From e3c16ab651b571495137a1c9e2537f33debcf505 Mon Sep 17 00:00:00 2001 From: Knative Automation Date: Tue, 16 Dec 2025 13:27:39 +0000 Subject: [PATCH] upgrade to latest dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bumping k8s.io/apiserver 92c4c2c...a1e5047: > a1e5047 Update dependencies to v0.34.3 tag > a978f35 Merge pull request # 135343 from michaelasp/automated-cherry-pick-of-# 135327-upstream-release-1.34 > 7b9813a Merge pull request # 135442 from lalitc375/automated-cherry-pick-of-# 135359-origin-release-1.34 > 4cb47d3 Fix alpha API warnings for patch version differences > 7b74a6d Fallback to live ns lookup on admission if lister cannot find namespace bumping go.opentelemetry.io/otel/exporters/otlp/otlptrace 84e3f3a...6ce1429: > 6ce1429 Release v1.39.0 (# 7676) > 12e421a sdk/log: move Enabled method from FilterProcessor to Processor (# 7639) > 5982f16 fix(deps): update module golang.org/x/sys to v0.39.0 (# 7684) > 9288378 chore(deps): update module golang.org/x/sync to v0.19.0 (# 7683) > ee3dfef chore(deps): update github.com/securego/gosec/v2 digest to 41f28e2 (# 7682) > 9345d1f fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.7.2 (# 7680) > d03b033 Check context prior to delaying retry in OTLP exporters (# 7678) > 61765e7 Fix flaky `TestClientInstrumentation` (# 7677) > a54721c chore(deps): update module github.com/go-git/go-billy/v5 to v5.7.0 (# 7679) > 746d086 chore(deps): update github/codeql-action action to v4.31.7 (# 7675) > 1bc9713 Regenerate `ErrorType` documentation in `semconv/v1.37.0` (# 7667) > 5a692cf Fix whitespace in `semconv/v1.33.0` (# 7665) > 4eff89b Fix whitespace in `semconv/v1.32.0` (# 7666) > d1825df Fix whitespace in `semconv/v1.36.0` (# 7663) > ddc307d Fix package name documentation and missing copyright in `semconv/v1.32.0` (# 7659) > 3e85447 Fix whitespace in `semconv/v1.34.0` (# 7664) > a64b9ec Fix package documentation name and return error in `semconv/v1.36.0` (# 7656) > be85ff8 Fix whitespace in `semconv/v1.37.0` (# 7660) > cddeb68 Fix package name documentation and missing copyright in `semconv/v1.34.0` (# 7657) > 3659648 Fix package name documentation and missing copyright in `semconv/v1.33.0` (# 7658) > e69beb8 Fix package documentation name and return err in `semconv/v1.37.0` (# 7655) > ddd0420 fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.7.1 (# 7671) > 21e75b9 chore(deps): update module github.com/ldez/gomoddirectives to v0.8.0 (# 7669) > ca53078 Instrument the `SimpleLogProcessor` from sdk/log (# 7548) > df83919 chore(deps): update module github.com/spf13/cobra to v1.10.2 (# 7668) > 6af2f2f Generate semconv/v1.38.0 (# 7648) > 20fdce2 fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.7.0 (# 7661) > 79144c5 chore(deps): update golang.org/x/telemetry digest to 8fff8a5 (# 7654) > d6aef9b chore(deps): update actions/checkout action to v6.0.1 (# 7651) > 98f784d fix(deps): update googleapis to ff82c1b (# 7652) > 5ea2a32 chore(deps): update actions/stale action to v10.1.1 (# 7653) > 85c1dfe chore(deps): update module github.com/godoc-lint/godoc-lint to v0.10.2 (# 7650) > 729bd6e fix(deps): update module go.opentelemetry.io/collector/pdata to v1.47.0 (# 7646) > d5faa3e chore(deps): update github/codeql-action action to v4.31.6 (# 7644) > 495a2c2 chore(deps): update golang.org/x/telemetry digest to abf20d0 (# 7643) > a72103c chore(deps): update module github.com/hashicorp/go-version to v1.8.0 (# 7641) > a0a0acd fix(deps): update golang.org/x to 87e1e73 (# 7636) > c152342 fix(deps): update googleapis to 79d6a2a (# 7634) > 6a16d13 chore(deps): update golang.org/x/telemetry digest to 55bbf37 (# 7633) > d3b4232 chore(deps): update module github.com/tomarrell/wrapcheck/v2 to v2.12.0 (# 7632) > cbcbb2d chore(deps): update github/codeql-action action to v4.31.5 (# 7631) > 414432d chore(deps): update module github.com/go-git/go-git/v5 to v5.16.4 (# 7629) > 7323fc7 chore(deps): update golang.org/x/telemetry digest to e487659 (# 7619) > d799e06 chore(deps): update module github.com/prometheus/common to v0.67.4 (# 7626) > c6e4cca chore(deps): update module dev.gaijin.team/go/golib to v0.8.0 (# 7627) > da01323 otlploghttp: support OTEL_EXPORTER_OTLP_LOGS_INSECURE and OTEL_EXPORTER_OTLP_INSECURE env vars (# 7608) > 4200d1e chore(deps): update actions/checkout action to v6 (# 7624) > 67d264a chore(deps): update module golang.org/x/crypto to v0.45.0 [security] (# 7622) > 7d39542 chore(deps): update module github.com/cyphar/filepath-securejoin to v0.6.1 (# 7618) > 9c98f52 chore(deps): update module go.uber.org/zap to v1.27.1 (# 7620) > 61649a4 chore(deps): update actions/setup-go action to v6.1.0 (# 7623) > 4929285 Replace fnv with xxhash (# 7497) > 98eb065 chore(deps): update github/codeql-action action to v4.31.4 (# 7615) > 5eea765 Upgrade macos tests to latest (# 7597) > dafdbf8 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.46.0 (# 7611) > 205b584 chore(deps): update module github.com/prometheus/common to v0.67.3 (# 7613) > dfffbcf fix(deps): update module google.golang.org/grpc to v1.77.0 (# 7612) > e2f25cd chore(deps): update actions/checkout action to v5.0.1 (# 7609) > af4399f chore(deps): update module go.opentelemetry.io/collector/featuregate to v1.46.0 (# 7610) > 48eaa6c fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.6.2 (# 7604) > e35220d chore(deps): update module github.com/mgechev/revive to v1.13.0 (# 7605) > d75bcb2 chore(deps): update github/codeql-action action to v4.31.3 (# 7603) > 6142eb6 fix(deps): update golang.org/x to e25ba8c (# 7602) > 8c2fb6f chore: exporters/prometheus/internal/x - Generate x package from x component template (# 7491) > cb5b1b6 fix(deps): update module golang.org/x/tools to v0.39.0 (# 7601) > bfb946a chore(deps): update golang.org/x/telemetry digest to 03ef243 (# 7600) > cbe16b6 fix(deps): update googleapis to 95abcf5 (# 7598) > 19a640a chore(deps): update golang.org/x (# 7599) > 1411938 fix(deps): update googleapis to 83f4791 (# 7594) > d1ebd7b fix(deps): update golang.org/x (# 7590) > bcf8234 chore(deps): update module github.com/catenacyber/perfsprint to v0.10.1 (# 7588) > 5c3ba32 chore(deps): update module github.com/maratori/testpackage to v1.1.2 (# 7586) > dff05f9 chore(deps): update module github.com/maratori/testableexamples to v1.0.1 (# 7585) > 093cdb6 chore(deps): update golang.org/x/telemetry digest to 5cc343d (# 7584) > fe47da3 sdk/log: update ExampleProcessor_eventName to use otel.event.name attribute (# 7568) > 13b122c chore(deps): update golang.org/x/telemetry digest to cbe4531 (# 7582) > ac5fbd7 chore(deps): update module github.com/mirrexone/unqueryvet to v1.3.0 (# 7583) > a80ee2d feat: instrument periodic reader from sdk/metric (# 7571) > 22b5704 chore(deps): update otel/weaver docker tag to v0.19.0 (# 7580) > 6120ee8 trace: add fuzz tests for TraceIDFromHex and SpanIDFromHex (# 7577) > ac5b181 fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.6.1 (# 7579) > 389571b chore(deps): update golang.org/x/telemetry digest to ab4e49a (# 7578) > 69dfcc5 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.45.0 (# 7575) > 38203b6 docs: sign artifacts before releasing (# 7567) > 635cf8d docs: remove demo repository update after release (# 7566) > 68190cc Instrument manual reader from sdk/metric (# 7524) > 4438ec3 fix(deps): update module go.opentelemetry.io/proto/otlp to v1.9.0 (# 7570) > 0e4d4ed fix(deps): update googleapis to f26f940 (# 7569) > d0483a7 chore(deps): update module github.com/cyphar/filepath-securejoin to v0.6.0 (# 7564) > 8a930a9 chore(deps): update module github.com/cyphar/filepath-securejoin to v0.5.1 (# 7563) > adbaa43 fix(deps): update build-tools to v0.29.0 (# 7561) > c1dc104 chore(deps): update module github.com/charmbracelet/x/term to v0.2.2 (# 7560) > e308db8 chore: handle Float64Histogram in log/observe errMeter (# 7555) > 5616ce4 chore(deps): update module github.com/go-critic/go-critic to v0.14.2 (# 7559) > d7ceecf fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.6.0 (# 7554) > 893166a chore(deps): update module github.com/prometheus/procfs to v0.19.2 (# 7558) > 6fe90a7 chore(deps): update github/codeql-action action to v4.31.2 (# 7556) > 5ab687f chore(deps): update module github.com/go-critic/go-critic to v0.14.1 (# 7557) > c3eda34 Add disclaimer to span.SetAttributes (# 7550) > 8b61c33 chore(deps): update lycheeverse/lychee-action action to v2.7.0 (# 7552) > 9401e21 fix(deps): update googleapis to ab9386a (# 7553) > b209eca chore(deps): update module github.com/stbenjam/no-sprintf-host-port to v0.3.1 (# 7551) > 89da8a1 chore(deps): update module github.com/prometheus/common to v0.67.2 (# 7547) > ab21764 chore(deps): update golang.org/x/telemetry digest to d7a2859 (# 7546) > 82c8953 chore(deps): update module github.com/karamaru-alpha/copyloopvar to v1.2.2 (# 7545) > 0b0a4d3 chore(deps): update mvdan.cc/unparam digest to 5beb8c8 (# 7544) > c534ddd chore(deps): update module github.com/clipperhouse/uax29/v2 to v2.3.0 (# 7543) > 508eb2e chore(deps): update module github.com/prometheus/procfs to v0.19.1 (# 7542) > bd5db0f chore(deps): update module github.com/ashanbrown/forbidigo/v2 to v2.3.0 (# 7540) > c9b7ecf chore(deps): update module github.com/ashanbrown/makezero/v2 to v2.1.0 (# 7538) > af4f6ae chore(deps): update module github.com/prometheus/procfs to v0.19.0 (# 7539) > c434d43 chore(deps): update github/codeql-action action to v4.31.0 (# 7536) > 3ecd36a chore(deps): update github artifact actions (major) (# 7537) > e71de16 chore(deps): update module github.com/charithe/durationcheck to v0.0.11 (# 7534) > b15942f Added the `internal/observ` package to log (# 7532) > f1ba319 fix(deps): update golang.org/x to a4bb9ff (# 7533) > f0c2457 chore(deps): update golang.org/x/telemetry digest to 5be28d7 (# 7528) > 6f7ffc1 fix(deps): update googleapis to 3a174f9 (# 7529) > 060af76 chore(deps): update module github.com/prometheus/procfs to v0.18.0 (# 7530) > bc28867 Move scorpionknifes to emeritus (# 7526) > 7958d6f chore(deps): update module mvdan.cc/gofumpt to v0.9.2 (# 7527) > eadb423 Instrument the `otlploghttp` exporter (# 7512) > eb87c43 chore(deps): update module github.com/abirdcfly/dupword to v0.1.7 (# 7525) > 26ce126 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.44.0 (# 7523) > 713012c chore(deps): update module go.opentelemetry.io/collector/featuregate to v1.44.0 (# 7522) > f0fefb9 fix(deps): update googleapis to 88f65dc (# 7521) > 56138d1 fix(deps): update golang.org/x (# 7482) > eb7ec3e Simulate failures for histogram creation paths without risking a nil-interface panic (# 7518) > f7d2882 feat: sdk/trace: span processed metric for simple span processor (# 7374) > 2e5fdd1 chore(deps): update github/codeql-action action to v4.30.9 (# 7515) > b5b6989 sdk/log: Fix AddAttributes, SetAttributes, SetBody on Record to not mutate input (# 7403) > f346dec chore: sdk/internal/x - generate x package from shared template (# 7495) > b37822b Prometheus exporter tests: iterate through all scopes rather than looking only at the first (# 7510) > 9dea78c chore(deps): update module github.com/go-critic/go-critic to v0.14.0 (# 7509) > ee0c203 fix(observ): correct rejected items and update comment style (# 7502) > 86e673c chore(deps): update module github.com/godoc-lint/godoc-lint to v0.10.1 (# 7508) > 0e18cf4 fix(deps): update googleapis to 4626949 (# 7506) > b78550d Added the `internal/observ` package to otlploghttp (# 7484) > b3129d3 docs: remove demo-accounting service from dependency list in releasing (# 7503) > 643e735 chore(deps): update module github.com/kunwardeep/paralleltest to v1.0.15 (# 7501) > 1935e60 Fix typos and linguistic errors in documentation / hacktoberfest (# 7494) > fa8e48b OTLP trace exporter include W3C trace flags (bits 0–7) in Span.Flags (# 7438) > 07a26be chore(deps): update module github.com/catenacyber/perfsprint to v0.10.0 (# 7496) > 73cbc69 chore(deps): update github/codeql-action action to v4.30.8 (# 7489) > f58f79b Instrument the `otlptracehttp` exporter (# 7486) > 5c78f7c chore(deps): update module github.com/gofrs/flock to v0.13.0 (# 7487) > 691638a Move sdk/internal/env to sdk/trace/internal/env (# 7437) > c8fc171 Add the `internal/observ` pkg to `otlptracehttp` (# 7480) > 874c4c3 feat: Improve error handling in prometheus exporter (# 7363) > a817caa Add a version const to otlptracehttp (# 7479) > 04ea40e Add the internal `x` package to `otlptracehttp` (# 7476) > 8e8c8c8 chore(deps): update module github.com/ldez/exptostd to v0.4.5 (# 7483) > f89d9f9 chore(deps): update module github.com/nunnatsa/ginkgolinter to v0.21.2 (# 7481) > dc8303b chore(deps): update golang.org/x (# 7477) > 0c97dfd fix(deps): update golang.org/x (# 7475) > 798133c chore(deps): update module github.com/skeema/knownhosts to v1.3.2 (# 7471) > 12bae3a chore(deps): update github/codeql-action action to v4 (# 7472) > 7375147 chore(deps): update module golang.org/x/net to v0.45.0 (# 7470) > 8f0e60d fix(deps): update google.golang.org/genproto/googleapis/rpc digest to 49b9836 (# 7469) > c692bc4 Instrument the `otlptracegrpc` exporter (# 7459) > ce38247 chore(deps): update google.golang.org/genproto/googleapis/api digest to 49b9836 (# 7468) > dd9576c chore(deps): update github/codeql-action action to v3.30.7 (# 7467) > 5f5f4af Document the ordering guarantees provided by the metrics SDK (# 7453) > b64883d chore(deps): update module github.com/prometheus/common to v0.67.1 (# 7465) > 78548fb chore(deps): update module github.com/stretchr/objx to v0.5.3 (# 7464) > c8e3897 Use sync.Map and atomics to improve sum performance (# 7427) > cfd8570 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.43.0 (# 7462) > 681f607 fix(deps): update module google.golang.org/grpc to v1.76.0 (# 7463) > 94f243d chore(deps): update module go.opentelemetry.io/collector/featuregate to v1.43.0 (# 7461) > 11260cd fix(deps): update googleapis to 65f7160 (# 7460) > 14d6372 Add the `internal/observ` package to `otlptracegrpc` (# 7404) > a10652b sdk/trace: trace id high 64 bit tests (# 7212) > 5937fc8 chore(deps): update module github.com/bombsimon/wsl/v5 to v5.3.0 (# 7457) > a39337f chore(deps): update module github.com/go-git/go-git/v5 to v5.16.3 (# 7456) > 64d2bed fix(deps): update build-tools to v0.28.1 (# 7455) > c4bdd87 Support custom error type semantics (# 7442) > 931a5bd chore(deps): update actions/stale action to v10.1.0 (# 7452) > cf1f668 chore(deps): update module github.com/ghostiam/protogetter to v0.3.17 (# 7451) > bd1b3da Add exemplar reservoir parallel benchmarks (# 7441) > dc906d6 chore(deps): update module github.com/grpc-ecosystem/grpc-gateway/v2 to v2.27.3 (# 7450) > 3757239 fix(deps): update googleapis to 7c0ddcb (# 7449) > 7352831 fix(deps): update golang.org/x to 27f1f14 (# 7448) > 5dd35ce feat: logs SDK observability - otlploggrpc exporter metrics (# 7353) > 4b9e111 Skip link checking for acm.org which blocks the link checker (# 7444) > 48cbdac chore(deps): update github/codeql-action action to v3.30.6 (# 7446) > 3ea8606 fix(deps): update module google.golang.org/protobuf to v1.36.10 (# 7445) > dee11e6 Add temporality selector functions (# 7434) > ffeeee8 chore(deps): update peter-evans/create-issue-from-file action to v6 (# 7440) > 862a41a chore(deps): update golang.org/x/telemetry digest to 4eae98a (# 7439) > ea38204 Allow optimizing locking for built-in exemplar reservoirs (# 7423) > 6c54ef6 chore(deps): update ossf/scorecard-action action to v2.4.3 (# 7435) > 4d2b735 chore(deps): update golang.org/x/telemetry digest to 8e64475 (# 7431) > e54c038 chore(deps): update module github.com/charmbracelet/x/ansi to v0.10.2 (# 7432) > addcd63 fix(deps): update googleapis to 57b25ae (# 7429) > 45539cf Only enforce cardinality limits when the attribute set does not already exist (# 7422) > 59ac46c Prometheus exporter: change default translation strategy (# 7421) > 692e519 chore(deps): update module github.com/mattn/go-runewidth to v0.0.19 (# 7428) > 6cb0e90 Generate gRPC Client target parsing func (# 7424) > 81aeace chore(deps): update module go.augendre.info/fatcontext to v0.9.0 (# 7426) > 0db5ac7 sdk/trace/internal/x: generate x package from x component template # 7385 (# 7411) > fef6ee5 chore(deps): update github/codeql-action action to v3.30.5 (# 7425) > 22cfbce Add concurrent safe tests for metric aggregations (# 7379) > fc89784 chore(deps): update module github.com/cyphar/filepath-securejoin to v0.5.0 (# 7419) > bdd4881 chore(deps): update module github.com/mattn/go-runewidth to v0.0.17 (# 7418) > ac8d8e9 Optimize Observability return types in in Prometheus exporter (# 7410) > 88d3fed Optimize the return type of ExportSpans (# 7405) > 63ed041 chore(deps): update module github.com/quasilyte/go-ruleguard/dsl to v0.3.23 (# 7417) > 016b175 chore(deps): update module github.com/quasilyte/go-ruleguard to v0.4.5 (# 7416) > a883fa1 chore(deps): update github/codeql-action action to v3.30.4 (# 7414) > 97a78c1 Add measure benchmarks with exemplars recorded (# 7406) > 2e0b5b4 Add benchmark for synchronous gauge measurement (# 7407) > 97e2244 [chore]: Clean-up unused obsScopeName const (# 7408) > b85e2c1 chore(deps): update actions/cache action to v4.3.0 (# 7409) > 250a11e Add experimental `x` package to `otlptracegrpc` (# 7401) > 466f0cd chore(deps): update module dev.gaijin.team/go/golib to v0.7.0 (# 7402) > 3f05c91 chore(deps): update module github.com/ldez/gomoddirectives to v0.7.1 (# 7400) > 4c9c611 Link checker: ignore https localhost uris (# 7399) > 0cc2eb9 sdk/log: BenchmarkAddAttributes, BenchmarkSetAttributes, BenchmarkSetBody (# 7387) > 80cb909 refactor: replace `context.Background()` with `t.Context()`/`b.Context()` in tests (# 7352) > 2389f44 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.42.0 (# 7397) > 5d3ce38 fix(deps): update googleapis to 9219d12 (# 7393) > dd938b2 fix(deps): update build-tools to v0.28.0 (# 7395) > 4e3152d chore(deps): update module go.opentelemetry.io/build-tools to v0.28.0 (# 7394) > 56498ab chore: sdk/log/internal/x - generate x package from x component template (# 7389) > a579a3e fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.5.0 (# 7392) > de1563e chore(deps): update module github.com/tetafro/godot to v1.5.4 (# 7391) > b5d5bba chore(deps): update module github.com/sagikazarmark/locafero to v0.12.0 (# 7390) > a189c6b sdk/log: add TestRecordMethodsInputConcurrentSafe (# 7378) > 6180f83 Return partial OTLP export errors to the caller (# 7372) > 60f9f39 feat(prometheus): Add observability for prometheus exporter (# 7345) > d1dddde fix(deps): update github.com/opentracing-contrib/go-grpc/test digest to a6e64aa (# 7375) > e4bb37c chore(deps): update otel/weaver docker tag to v0.18.0 (# 7377) > be8e7b2 chore(deps): update module github.com/kulti/thelper to v0.7.1 (# 7376) > b866e36 chore(deps): update module github.com/ldez/grignotin to v0.10.1 (# 7373) > 9d52bde Use Set hash in Distinct (2nd attempt) (# 7175) > 666f95c Fix the typo in test names (# 7369) > 1298533 chore(deps): update module github.com/djarvur/go-err113 to v0.1.1 (# 7368) > 18b114b fix(deps): update module github.com/prometheus/otlptranslator to v1 (# 7358) > 7e4006a chore: generate feature flag files from shared (# 7361) > 5b808c6 Encapsulate SDK Tracer observability (# 7331) > e4ab314 Encapsulate SDK BatchSpanProcessor observability (# 7332) > 6243f21 fix(deps): update module go.opentelemetry.io/auto/sdk to v1.2.1 (# 7365) > 4fdd552 Track context containing span in `recordingSpan` (# 7354) > 59563f7 Do not use the user-defined empty set when comparing sets. (# 7357) > 01611d3 chore(deps): update module github.com/tetafro/godot to v1.5.2 (# 7360) > 305ec06 chore(deps): update module github.com/nunnatsa/ginkgolinter to v0.21.0 (# 7362) > 0d5aa14 chore(deps): update module github.com/antonboom/testifylint to v1.6.4 (# 7359) > 285cbe9 sdk/metric: add example for metricdatatest package (# 7323) > e2da30d trace,metric,log: change WithInstrumentationAttributes to not de-depuplicate the passed attributes in a closure (# 7266) > b168550 fix(deps): update golang.org/x to df92998 (# 7350) > 7fdebbe Rename Self-Observability as just Observability (# 7302) > b06d273 chore(deps): update github/codeql-action action to v3.30.3 (# 7348) > 38d7c39 chore(deps): update module go.yaml.in/yaml/v2 to v2.4.3 (# 7349) > 31629e2 fix(deps): update module google.golang.org/grpc to v1.75.1 (# 7344) > 6d1f9d0 fix(deps): update module golang.org/x/tools to v0.37.0 (# 7347) > df6058a chore(deps): update module golang.org/x/net to v0.44.0 (# 7341) > e26cebf chore(deps): update module github.com/antonboom/nilnil to v1.1.1 (# 7343) > 3baabce Do not allocate instrument options if possible in generated semconv packages (# 7328) > 9b6585a Encapsulate `stdouttrace.Exporter` instrumentation in internal package (# 7307) > a07b7e6 fix(deps): update module google.golang.org/protobuf to v1.36.9 (# 7340) > 71fda10 chore(deps): update golang.org/x (# 7326) > f2ea3f1 chore(deps): update github/codeql-action action to v3.30.2 (# 7339) > 6f50705 fix(deps): update googleapis to 9702482 (# 7335) > c4a6339 chore(deps): update module github.com/spf13/cast to v1.10.0 (# 7333) > 4368300 chore(deps): update module github.com/spf13/viper to v1.21.0 (# 7334) > d0b6f18 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.41.0 (# 7337) > 81c1aca chore(deps): update module github.com/antonboom/errname to v1.1.1 (# 7338) > eb9fd73 chore(deps): update module github.com/lucasb-eyer/go-colorful to v1.3.0 (# 7327) > e759fbd chore(deps): update module github.com/sagikazarmark/locafero to v0.11.0 (# 7329) > cec9d59 chore(deps): update module github.com/spf13/afero to v1.15.0 (# 7330) > 07a91dd trace,metric,log: add WithInstrumentationAttributeSet option (# 7287) > b335c07 Encapsulate observability in Logs SDK (# 7315) > dcf14aa trace,metric,log: WithInstrumentationAttributes options to merge attributes (# 7300) > 63f1ee7 chore(deps): update module mvdan.cc/gofumpt to v0.9.1 (# 7322) > 6f04175 chore(deps): update golang.org/x (# 7324) > 567ef26 Add benchmark for exponential histogram measurements (# 7305) > 8ac554a fix(deps): update golang.org/x (# 7320) > b218e4b Don't track min and max when disabled (# 7306) > 810095e chore(deps): update benchmark-action/github-action-benchmark action to v1.20.7 (# 7319) > f8a9510 fix(deps): update module github.com/prometheus/client_golang to v1.23.2 (# 7314) > 8cea039 chore(deps): update golang.org/x/telemetry digest to af835b0 (# 7313) > 4a0606d chore(deps): update module github.com/pjbgf/sha1cd to v0.5.0 (# 7317) > 2de26d1 chore(deps): update github.com/grafana/regexp digest to f7b3be9 (# 7311) > 97c4e6c chore(deps): update github/codeql-action action to v3.30.1 (# 7312) > e2a4fb3 chore(deps): update golang.org/x/telemetry digest to 9b996f7 (# 7308) > de4b553 chore(deps): update module github.com/bombsimon/wsl/v5 to v5.2.0 (# 7309) > a5dcd68 Add Observability section to CONTRIBUTING doc (# 7272) > 4107421 chore(deps): update codecov/codecov-action action to v5.5.1 (# 7303) > d8d6e52 fix(deps): update module github.com/prometheus/client_golang to v1.23.1 (# 7304) > e54519a chore(deps): update actions/setup-go action to v6 (# 7298) > a0cc03c chore(deps): update actions/stale action to v10 (# 7299) > 8c8cd0a fix(deps): update module go.opentelemetry.io/proto/otlp to v1.8.0 (# 7296) > 83c041a chore(deps): update module mvdan.cc/gofumpt to v0.9.0 (# 7292) > 295fbdc chore(deps): update module github.com/golangci/go-printf-func-name to v0.1.1 (# 7290) > 09e5d69 chore(deps): update module github.com/ghostiam/protogetter to v0.3.16 (# 7289) > 7cb19f9 chore(deps): update benchmark-action/github-action-benchmark action to v1.20.5 (# 7293) > b8f00e3 chore(deps): update module github.com/spf13/pflag to v1.0.10 (# 7291) > 0174808 Fix schema urls (# 7288) > 5e3b939 Add tracetest example for testing instrumentation (# 7107) > 090e9ef chore(deps): update module github.com/spf13/cobra to v1.10.1 (# 7286) > a389393 chore(deps): update github/codeql-action action to v3.30.0 (# 7284) > 6ccc387 chore(deps): update module github.com/spf13/cobra to v1.10.0 (# 7285) > 774c740 Fix missing link in changelog (# 7273) > 5d1ec3a fix(deps): update github.com/opentracing-contrib/go-grpc/test digest to 0261db7 (# 7278) > f74ab34 chore(deps): update module github.com/spf13/pflag to v1.0.9 (# 7282) > b0903af chore(deps): update module github.com/rogpeppe/go-internal to v1.14.1 (# 7283) > 358fa01 fix(deps): update googleapis to ef028d9 (# 7279) > 68b1c4c fix(deps): update module github.com/opentracing-contrib/go-grpc to v0.1.2 (# 7281) > c8632bc fix(deps): update golang.org/x (# 7188) > 18ad4a1 fix(deps): update module github.com/golangci/golangci-lint/v2 to v2.4.0 (# 7277) > c2fea5f chore(deps): update module github.com/securego/gosec/v2 to v2.22.8 (# 7276) > 83403d3 fix(deps): update module go.opentelemetry.io/collector/pdata to v1.40.0 (# 7275) > 8ab8e42 Drop support for Go 1.23 (# 7274) Signed-off-by: Knative Automation --- go.mod | 68 +- go.sum | 140 +- .../grpc-gateway/v2/runtime/context.go | 6 +- .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 6 +- .../grpc-gateway/v2/runtime/mux.go | 16 +- .../prometheus/common/expfmt/decode.go | 14 +- .../common/expfmt/openmetrics_create.go | 59 +- .../prometheus/common/expfmt/text_create.go | 58 +- .../prometheus/common/expfmt/text_parse.go | 102 +- .../prometheus/procfs/.golangci.yml | 28 +- vendor/github.com/prometheus/procfs/Makefile | 2 +- .../prometheus/procfs/Makefile.common | 2 +- vendor/github.com/prometheus/procfs/arp.go | 9 +- .../github.com/prometheus/procfs/buddyinfo.go | 10 +- .../github.com/prometheus/procfs/cmdline.go | 2 +- .../github.com/prometheus/procfs/cpuinfo.go | 2 +- .../prometheus/procfs/cpuinfo_armx.go | 2 +- .../prometheus/procfs/cpuinfo_loong64.go | 2 +- .../prometheus/procfs/cpuinfo_mipsx.go | 2 +- .../prometheus/procfs/cpuinfo_others.go | 2 +- .../prometheus/procfs/cpuinfo_ppcx.go | 2 +- .../prometheus/procfs/cpuinfo_riscvx.go | 2 +- .../prometheus/procfs/cpuinfo_s390x.go | 2 +- .../prometheus/procfs/cpuinfo_x86.go | 2 +- vendor/github.com/prometheus/procfs/crypto.go | 2 +- vendor/github.com/prometheus/procfs/doc.go | 2 +- vendor/github.com/prometheus/procfs/fs.go | 2 +- .../prometheus/procfs/fs_statfs_notype.go | 2 +- .../prometheus/procfs/fs_statfs_type.go | 2 +- .../github.com/prometheus/procfs/fscache.go | 9 +- .../prometheus/procfs/internal/fs/fs.go | 2 +- .../prometheus/procfs/internal/util/parse.go | 2 +- .../procfs/internal/util/readfile.go | 2 +- .../procfs/internal/util/sysreadfile.go | 2 +- .../internal/util/sysreadfile_compat.go | 2 +- .../procfs/internal/util/valueparser.go | 2 +- vendor/github.com/prometheus/procfs/ipvs.go | 2 +- .../prometheus/procfs/kernel_hung.go | 45 + .../prometheus/procfs/kernel_random.go | 2 +- .../github.com/prometheus/procfs/loadavg.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 118 +- .../github.com/prometheus/procfs/meminfo.go | 4 +- .../github.com/prometheus/procfs/mountinfo.go | 23 +- .../prometheus/procfs/mountstats.go | 4 +- .../prometheus/procfs/net_conntrackstat.go | 2 +- .../github.com/prometheus/procfs/net_dev.go | 2 +- .../prometheus/procfs/net_dev_snmp6.go | 7 +- .../prometheus/procfs/net_ip_socket.go | 2 +- .../prometheus/procfs/net_protocols.go | 4 +- .../github.com/prometheus/procfs/net_route.go | 2 +- .../prometheus/procfs/net_sockstat.go | 5 +- .../prometheus/procfs/net_softnet.go | 2 +- .../github.com/prometheus/procfs/net_tcp.go | 2 +- .../prometheus/procfs/net_tls_stat.go | 2 +- .../github.com/prometheus/procfs/net_udp.go | 2 +- .../github.com/prometheus/procfs/net_unix.go | 2 +- .../prometheus/procfs/net_wireless.go | 2 +- .../github.com/prometheus/procfs/net_xfrm.go | 2 +- .../github.com/prometheus/procfs/netstat.go | 2 +- .../prometheus/procfs/nfnetlink_queue.go | 85 + vendor/github.com/prometheus/procfs/proc.go | 4 +- .../prometheus/procfs/proc_cgroup.go | 2 +- .../prometheus/procfs/proc_cgroups.go | 8 +- .../prometheus/procfs/proc_environ.go | 2 +- .../prometheus/procfs/proc_fdinfo.go | 13 +- .../prometheus/procfs/proc_interrupts.go | 2 +- .../github.com/prometheus/procfs/proc_io.go | 2 +- .../prometheus/procfs/proc_limits.go | 7 +- .../github.com/prometheus/procfs/proc_maps.go | 2 +- .../prometheus/procfs/proc_netstat.go | 2 +- .../github.com/prometheus/procfs/proc_ns.go | 2 +- .../github.com/prometheus/procfs/proc_psi.go | 2 +- .../prometheus/procfs/proc_smaps.go | 2 +- .../github.com/prometheus/procfs/proc_snmp.go | 2 +- .../prometheus/procfs/proc_snmp6.go | 2 +- .../github.com/prometheus/procfs/proc_stat.go | 2 +- .../prometheus/procfs/proc_statm.go | 4 +- .../prometheus/procfs/proc_status.go | 9 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- .../github.com/prometheus/procfs/schedstat.go | 2 +- vendor/github.com/prometheus/procfs/slab.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 2 +- vendor/github.com/prometheus/procfs/stat.go | 5 +- vendor/github.com/prometheus/procfs/swaps.go | 2 +- vendor/github.com/prometheus/procfs/thread.go | 2 +- vendor/github.com/prometheus/procfs/vm.go | 2 +- .../github.com/prometheus/procfs/zoneinfo.go | 8 +- .../auto/sdk/internal/telemetry/id.go | 2 +- .../auto/sdk/internal/telemetry/number.go | 2 +- .../auto/sdk/internal/telemetry/span.go | 70 +- .../auto/sdk/internal/telemetry/status.go | 10 +- .../auto/sdk/internal/telemetry/traces.go | 4 +- .../auto/sdk/internal/telemetry/value.go | 14 +- vendor/go.opentelemetry.io/auto/sdk/span.go | 25 +- vendor/go.opentelemetry.io/auto/sdk/tracer.go | 29 +- .../net/http/otelhttp/client.go | 15 + .../net/http/otelhttp/config.go | 19 +- .../instrumentation/net/http/otelhttp/doc.go | 3 +- .../net/http/otelhttp/handler.go | 7 +- .../http/otelhttp/internal/semconv/client.go | 305 + .../net/http/otelhttp/internal/semconv/env.go | 248 - .../net/http/otelhttp/internal/semconv/gen.go | 8 +- .../otelhttp/internal/semconv/httpconv.go | 517 - .../http/otelhttp/internal/semconv/server.go | 403 + .../net/http/otelhttp/version.go | 2 +- .../contrib/instrumentation/runtime/doc.go | 29 - .../instrumentation/runtime/producer.go | 4 + .../instrumentation/runtime/runtime.go | 29 + .../instrumentation/runtime/version.go | 2 +- .../go.opentelemetry.io/otel/.codespellignore | 1 + vendor/go.opentelemetry.io/otel/.golangci.yml | 3 + vendor/go.opentelemetry.io/otel/.lycheeignore | 6 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 72 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 458 +- vendor/go.opentelemetry.io/otel/Makefile | 3 +- vendor/go.opentelemetry.io/otel/README.md | 11 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 54 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/attribute/encoder.go | 2 +- .../otel/attribute/hash.go | 92 + .../otel/attribute/internal/xxhash/xxhash.go | 64 + .../go.opentelemetry.io/otel/attribute/set.go | 145 +- .../otel/attribute/type_string.go | 5 +- .../otel/baggage/baggage.go | 12 +- .../otel/dependencies.Dockerfile | 2 +- .../otlp/otlpmetric/otlpmetricgrpc/client.go | 11 +- .../otlpmetric/otlpmetricgrpc/internal/gen.go | 2 +- .../internal/oconf/envconfig.go | 29 +- .../otlpmetricgrpc/internal/partialsuccess.go | 11 + .../otlpmetricgrpc/internal/retry/retry.go | 5 + .../otlp/otlpmetric/otlpmetricgrpc/version.go | 2 +- .../otlp/otlpmetric/otlpmetrichttp/client.go | 11 +- .../otlpmetric/otlpmetrichttp/internal/gen.go | 2 +- .../internal/oconf/envconfig.go | 29 +- .../otlpmetrichttp/internal/partialsuccess.go | 11 + .../otlpmetrichttp/internal/retry/retry.go | 5 + .../otlp/otlpmetric/otlpmetrichttp/version.go | 2 +- .../otlptrace/internal/tracetransform/span.go | 16 +- .../otlp/otlptrace/otlptracegrpc/client.go | 39 +- .../otlptracegrpc/internal/counter/counter.go | 31 + .../otlptrace/otlptracegrpc/internal/gen.go | 9 + .../otlptracegrpc/internal/observ/doc.go | 6 + .../internal/observ/instrumentation.go | 341 + .../otlptracegrpc/internal/observ/target.go | 143 + .../otlptracegrpc/internal/partialsuccess.go | 11 + .../otlptracegrpc/internal/retry/retry.go | 5 + .../otlptracegrpc/internal/version.go | 8 + .../otlptracegrpc/internal/x/README.md | 36 + .../otlptracegrpc/internal/x/observ.go | 22 + .../otlptrace/otlptracegrpc}/internal/x/x.go | 45 +- .../otlp/otlptrace/otlptracehttp/client.go | 44 +- .../otlptracehttp/internal/counter/counter.go | 31 + .../otlptrace/otlptracehttp/internal/gen.go | 6 + .../internal/observ/instrumentation.go | 397 + .../otlptracehttp/internal/partialsuccess.go | 11 + .../otlptracehttp/internal/retry/retry.go | 5 + .../otlptracehttp/internal/version.go | 8 + .../otlptracehttp/internal/x/observ.go | 22 + .../otlptrace/otlptracehttp/internal/x/x.go | 58 + .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/exporters/prometheus/config.go | 30 +- .../otel/exporters/prometheus/errors.go | 13 + .../otel/exporters/prometheus/exporter.go | 215 +- .../prometheus/internal/counter/counter.go | 31 + .../otel/exporters/prometheus/internal/gen.go | 12 + .../internal/observ/instrumentation.go | 249 + .../exporters/prometheus/internal/version.go | 9 + .../prometheus}/internal/x/README.md | 18 +- .../prometheus/internal/x/features.go | 22 + .../otel/exporters/prometheus/internal/x/x.go | 58 + .../stdout/stdouttrace/internal/gen.go | 12 + .../internal/observ/instrumentation.go | 214 + .../stdout/stdouttrace/internal/version.go | 8 + .../stdout/stdouttrace/internal/x/README.md | 8 +- .../stdout/stdouttrace/internal/x/features.go | 23 + .../stdout/stdouttrace/internal/x/x.go | 41 +- .../exporters/stdout/stdouttrace/trace.go | 148 +- .../otel/internal/global/meter.go | 2 +- vendor/go.opentelemetry.io/otel/metric.go | 2 +- .../go.opentelemetry.io/otel/metric/config.go | 38 +- .../otel/propagation/trace_context.go | 2 +- .../otel/sdk/internal/x/features.go | 39 + .../otel/sdk/internal/x/x.go | 46 +- .../otel/sdk/metric/doc.go | 16 + .../metric/exemplar/fixed_size_reservoir.go | 22 +- .../metric/exemplar/histogram_reservoir.go | 27 +- .../otel/sdk/metric/exemplar/storage.go | 14 +- .../otel/sdk/metric/instrumentkind_string.go | 5 +- .../metric/internal/aggregate/aggregate.go | 7 +- .../sdk/metric/internal/aggregate/atomic.go | 184 + .../aggregate/exponential_histogram.go | 21 +- .../internal/aggregate/filtered_reservoir.go | 28 +- .../metric/internal/aggregate/histogram.go | 71 +- .../metric/internal/aggregate/lastvalue.go | 16 +- .../sdk/metric/internal/aggregate/limit.go | 2 +- .../otel/sdk/metric/internal/aggregate/sum.go | 239 +- .../metric/internal/observ/instrumentation.go | 168 + .../internal/reservoir/concurrent_safe.go | 11 + .../otel/sdk/metric/internal/reservoir/doc.go | 6 + .../otel/sdk/metric/manual_reader.go | 41 +- .../metric/metricdata/temporality_string.go | 5 +- .../otel/sdk/metric/periodic_reader.go | 34 +- .../otel/sdk/metric/reader.go | 32 +- .../otel/sdk/metric/version.go | 2 +- .../otel/sdk/resource/host_id_bsd.go | 1 - .../otel/sdk/resource/host_id_linux.go | 1 - .../otel/sdk/resource/host_id_unsupported.go | 1 - .../otel/sdk/resource/host_id_windows.go | 1 - .../otel/sdk/resource/os_release_unix.go | 1 - .../otel/sdk/resource/os_unix.go | 1 - .../otel/sdk/resource/os_unsupported.go | 1 - .../otel/sdk/trace/batch_span_processor.go | 107 +- .../go.opentelemetry.io/otel/sdk/trace/doc.go | 2 +- .../otel/sdk/{ => trace}/internal/env/env.go | 2 +- .../internal/observ/batch_span_processor.go | 119 + .../otel/sdk/trace/internal/observ/doc.go | 6 + .../internal/observ/simple_span_processor.go | 97 + .../otel/sdk/trace/internal/observ/tracer.go | 223 + .../otel/sdk/trace/provider.go | 48 +- .../otel/sdk/trace/simple_span_processor.go | 31 +- .../otel/sdk/trace/span.go | 27 +- .../otel/sdk/trace/span_limits.go | 2 +- .../otel/sdk/trace/tracer.go | 134 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/internal/http.go | 2 +- .../otel/semconv/v1.26.0/README.md | 3 - .../otel/semconv/v1.26.0/attribute_group.go | 8996 ----------------- .../otel/semconv/v1.26.0/doc.go | 9 - .../otel/semconv/v1.26.0/exception.go | 9 - .../otel/semconv/v1.26.0/metric.go | 1307 --- .../otel/semconv/v1.26.0/schema.go | 9 - .../otel/semconv/v1.37.0/error_type.go | 47 +- .../otel/semconv/v1.37.0/goconv/metric.go | 167 +- .../otel/semconv/v1.37.0/httpconv/metric.go | 189 +- .../otel/semconv/v1.37.0/otelconv/metric.go | 312 +- .../go.opentelemetry.io/otel/trace/config.go | 45 +- vendor/go.opentelemetry.io/otel/trace/span.go | 4 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 27 +- .../proto/otlp/common/v1/common.pb.go | 23 +- .../proto/otlp/metrics/v1/metrics.pb.go | 40 +- .../proto/otlp/resource/v1/resource.pb.go | 3 +- .../proto/otlp/trace/v1/trace.pb.go | 36 +- vendor/golang.org/x/oauth2/oauth2.go | 3 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 3 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 2 + .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/ztypes_netbsd_arm.go | 2 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 99 +- .../grpc/balancer/pickfirst/pickfirst.go | 867 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 906 -- .../grpc/balancer/roundrobin/roundrobin.go | 6 +- .../grpc/balancer_wrapper.go | 9 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 11 +- .../grpc/credentials/credentials.go | 3 +- .../grpc/encoding/encoding.go | 20 + .../grpc/encoding/internal/internal.go | 28 + .../grpc/encoding/proto/proto.go | 20 +- .../grpc/experimental/stats/metricregistry.go | 33 + .../grpc/experimental/stats/metrics.go | 3 + .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../grpc/internal/buffer/unbounded.go | 1 + .../grpc/internal/channelz/trace.go | 2 +- .../grpc/internal/envconfig/envconfig.go | 14 +- .../grpc/internal/envconfig/xds.go | 11 + .../internal/grpcsync/callback_serializer.go | 22 +- .../delegatingresolver/delegatingresolver.go | 58 +- .../internal/stats/metrics_recorder_list.go | 10 + .../grpc/internal/stats/stats.go | 70 + .../grpc/internal/transport/client_stream.go | 32 +- .../grpc/internal/transport/controlbuf.go | 80 +- .../grpc/internal/transport/flowcontrol.go | 23 +- .../grpc/internal/transport/handler_server.go | 50 +- .../grpc/internal/transport/http2_client.go | 214 +- .../grpc/internal/transport/http2_server.go | 84 +- .../grpc/internal/transport/http_util.go | 174 +- .../grpc/internal/transport/server_stream.go | 13 +- .../grpc/internal/transport/transport.go | 90 +- .../google.golang.org/grpc/mem/buffer_pool.go | 16 +- .../grpc/mem/buffer_slice.go | 101 +- vendor/google.golang.org/grpc/preloader.go | 3 - vendor/google.golang.org/grpc/rpc_util.go | 16 +- vendor/google.golang.org/grpc/server.go | 100 +- vendor/google.golang.org/grpc/stream.go | 122 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/internal/filedesc/desc.go | 52 +- .../protobuf/internal/filedesc/desc_init.go | 14 + .../protobuf/internal/filedesc/desc_lazy.go | 20 + .../protobuf/internal/filedesc/editions.go | 12 +- .../protobuf/internal/version/version.go | 2 +- .../client-go/tools/cache/controller.go | 23 +- .../client-go/tools/cache/delta_fifo.go | 3 +- .../k8s.io/client-go/tools/cache/reflector.go | 8 +- .../reflector_data_consistency_detector.go | 4 +- .../client-go/tools/cache/shared_informer.go | 11 +- .../client-go/tools/cache/the_real_fifo.go | 3 +- .../data_consistency_detector.go | 23 +- vendor/modules.txt | 134 +- 313 files changed, 8596 insertions(+), 14385 deletions(-) create mode 100644 vendor/github.com/prometheus/procfs/kernel_hung.go create mode 100644 vendor/github.com/prometheus/procfs/nfnetlink_queue.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/hash.go create mode 100644 vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go rename vendor/go.opentelemetry.io/otel/{sdk/trace => exporters/otlp/otlptrace/otlptracegrpc}/internal/x/x.go (55%) create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/observ.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/errors.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/counter/counter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go rename vendor/go.opentelemetry.io/otel/{sdk/trace => exporters/prometheus}/internal/x/README.md (64%) create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/features.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/features.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go rename vendor/go.opentelemetry.io/otel/sdk/{ => trace}/internal/env/env.go (98%) create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go delete mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go create mode 100644 vendor/google.golang.org/grpc/encoding/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/stats.go diff --git a/go.mod b/go.mod index 80109901438..67f154e91af 100644 --- a/go.mod +++ b/go.mod @@ -30,29 +30,29 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/stretchr/testify v1.11.1 github.com/wavesoftware/go-ensure v1.0.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 - go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0 - go.opentelemetry.io/otel v1.38.0 - go.opentelemetry.io/otel/metric v1.38.0 - go.opentelemetry.io/otel/sdk v1.38.0 - go.opentelemetry.io/otel/sdk/metric v1.38.0 - go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 + go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0 + go.opentelemetry.io/otel v1.39.0 + go.opentelemetry.io/otel/metric v1.39.0 + go.opentelemetry.io/otel/sdk v1.39.0 + go.opentelemetry.io/otel/sdk/metric v1.39.0 + go.opentelemetry.io/otel/trace v1.39.0 go.uber.org/atomic v1.10.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.1 golang.org/x/net v0.47.0 golang.org/x/sync v0.18.0 - k8s.io/api v0.34.2 - k8s.io/apiextensions-apiserver v0.34.2 - k8s.io/apimachinery v0.34.2 - k8s.io/apiserver v0.34.2 - k8s.io/client-go v0.34.2 - k8s.io/code-generator v0.34.2 + k8s.io/api v0.34.3 + k8s.io/apiextensions-apiserver v0.34.3 + k8s.io/apimachinery v0.34.3 + k8s.io/apiserver v0.34.3 + k8s.io/client-go v0.34.3 + k8s.io/code-generator v0.34.3 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 knative.dev/hack v0.0.0-20251126013634-1484a9e9b641 knative.dev/hack/schema v0.0.0-20251126013634-1484a9e9b641 - knative.dev/pkg v0.0.0-20251126013532-e853b1d1d6bb - knative.dev/reconciler-test v0.0.0-20251126022416-3fd6f8701d7b + knative.dev/pkg v0.0.0-20251216131424-9cc841088d57 + knative.dev/reconciler-test v0.0.0-20251203013610-65cec2a81a56 sigs.k8s.io/randfill v1.0.0 sigs.k8s.io/yaml v1.6.0 ) @@ -78,7 +78,7 @@ require ( github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/gnostic-models v0.7.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -89,40 +89,40 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/common v0.67.4 // indirect github.com/prometheus/otlptranslator v1.0.0 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/rickb777/plural v1.2.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.60.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.61.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.44.0 // indirect golang.org/x/mod v0.30.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sys v0.38.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sys v0.39.0 // indirect golang.org/x/term v0.37.0 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.39.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect - google.golang.org/grpc v1.75.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/grpc v1.77.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 94ce0afbf9a..691eef0ae79 100644 --- a/go.sum +++ b/go.sum @@ -274,8 +274,8 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -435,15 +435,15 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= -github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos= github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/rickb777/date v1.13.0 h1:+8AmwLuY1d/rldzdqvqTEg7107bZ8clW37x4nsdG3Hs= github.com/rickb777/date v1.13.0/go.mod h1:GZf3LoGnxPWjX+/1TXOuzHefZFDovTyNLHDMd3qH70k= github.com/rickb777/plural v1.2.1 h1:UitRAgR70+yHFt26Tmj/F9dU9aV6UfjGXSbO1DcC9/U= @@ -452,8 +452,8 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -518,39 +518,39 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0 h1:PeBoRj6af6xMI7qCupwFvTbbnd49V7n5YpG6pg8iDYQ= -go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0/go.mod h1:ingqBCtMCe8I4vpz/UVzCW6sxoqgZB37nao91mLQ3Bw= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= -go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PDKVSLWP/Jocix3QWfXtbo= -go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= +go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0 h1:/+/+UjlXjFcdDlXxKL1PouzX8Z2Vl0OxolRKeBEgYDw= +go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0/go.mod h1:Ldm/PDuzY2DP7IypudopCR3OCOW42NJlN9+mNEroevo= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 h1:cEf8jF6WbuGQWUVcqgyWtTR0kOOAWY1DYZ+UhvdmQPw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0/go.mod h1:k1lzV5n5U3HkGvTCJHraTAGJ7MqsgL1wrGwTj1Isfiw= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0 h1:nKP4Z2ejtHn3yShBb+2KawiXgpn8In5cT7aO2wXuOTE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0/go.mod h1:NwjeBbNigsO4Aj9WgM0C+cKIrxsZUaRmZUO7A8I7u8o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU= +go.opentelemetry.io/otel/exporters/prometheus v0.61.0 h1:cCyZS4dr67d30uDyh8etKM2QyDsQ4zC9ds3bdbrVoD0= +go.opentelemetry.io/otel/exporters/prometheus v0.61.0/go.mod h1:iivMuj3xpR2DkUrUya3TPS/Z9h3dz7h01GxU+fQBRNg= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 h1:8UPA4IbVZxpsD76ihGOQiFml99GPAEZLohDXvqHdi6U= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0/go.mod h1:MZ1T/+51uIVKlRzGw1Fo46KEWThjlCBZKl2LzY5nv4g= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= -go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -567,8 +567,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -690,8 +690,8 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -780,8 +780,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -982,10 +982,10 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= -google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1013,8 +1013,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1029,8 +1029,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1065,18 +1065,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= -k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= -k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/code-generator v0.34.2 h1:9bG6jTxmsU3HXE5BNYJTC8AZ1D6hVVfkm8yYSkdkGY0= -k8s.io/code-generator v0.34.2/go.mod h1:dnDDEd6S/z4uZ+PG1aE58ySCi/lR4+qT3a4DddE4/2I= +k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= +k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= +k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= +k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= +k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= +k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.3 h1:uGH1qpDvSiYG4HVFqc6A3L4CKiX+aBWDrrsxHYK0Bdo= +k8s.io/apiserver v0.34.3/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w= +k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= +k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= +k8s.io/code-generator v0.34.3 h1:6ipJKsJZZ9q21BO8I2jEj4OLN3y8/1n4aihKN0xKmQk= +k8s.io/code-generator v0.34.3/go.mod h1:oW73UPYpGLsbRN8Ozkhd6ZzkF8hzFCiYmvEuWZDroI4= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 h1:wBIDZID8ju9pwOiLlV22YYKjFGtiNSWgHf5CnKLRUuM= k8s.io/gengo v0.0.0-20240404160639-a0386bf69313/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1096,10 +1096,10 @@ knative.dev/hack v0.0.0-20251126013634-1484a9e9b641 h1:N9Xqx3YLUNFN1WIc3UXTanK4j knative.dev/hack v0.0.0-20251126013634-1484a9e9b641/go.mod h1:L5RzHgbvam0u8QFHfzCX6MKxu/a/gIGEdaRBqNiVbl0= knative.dev/hack/schema v0.0.0-20251126013634-1484a9e9b641 h1:OiTXkLFGx9VxR4DDgKbcmcfJ7cN3aiqYDQ53IwZbAWo= knative.dev/hack/schema v0.0.0-20251126013634-1484a9e9b641/go.mod h1:KkibP1IazICP5ClxwN5D26LDSygsqbYnVGuGFTsHNOQ= -knative.dev/pkg v0.0.0-20251126013532-e853b1d1d6bb h1:JhpOj4b8wdTjbahkCyZUaFlkuHib/6uCVyjPI/W4MPg= -knative.dev/pkg v0.0.0-20251126013532-e853b1d1d6bb/go.mod h1:KFRQGFIcsdi9fDto0lGkcz/NwnNr/8hq9mmLyBuLkro= -knative.dev/reconciler-test v0.0.0-20251126022416-3fd6f8701d7b h1:gR6V7eQ9R1JMcf8N/812uZqtSQQHFqWRA/D7vkH0MyE= -knative.dev/reconciler-test v0.0.0-20251126022416-3fd6f8701d7b/go.mod h1:XiaOxTCk7xf54L6A5/oyBm7VOaGq1ouEpSTDxuME8Ag= +knative.dev/pkg v0.0.0-20251216131424-9cc841088d57 h1:JmPBxyxUhOuA5xzMGHoCawp/L9+xzP4IVeJWoV9jKg4= +knative.dev/pkg v0.0.0-20251216131424-9cc841088d57/go.mod h1:hTx1WGG6Qm832k2m7Ys0mZua8eds4XzY6fkDurNJTC0= +knative.dev/reconciler-test v0.0.0-20251203013610-65cec2a81a56 h1:cshdTYJyUKppLc+9MiYr37lXjfoGgf70JdU6LIlm2yc= +knative.dev/reconciler-test v0.0.0-20251203013610-65cec2a81a56/go.mod h1:XiaOxTCk7xf54L6A5/oyBm7VOaGq1ouEpSTDxuME8Ag= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 2f2b342431d..00b2228a1de 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -201,13 +201,13 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM if timeout != 0 { ctx, _ = context.WithTimeout(ctx, timeout) } - if len(pairs) == 0 { - return ctx, nil, nil - } md := metadata.Pairs(pairs...) for _, mda := range mux.metadataAnnotators { md = metadata.Join(md, mda(ctx, req)) } + if len(md) == 0 { + return ctx, nil, nil + } return ctx, md, nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 8376d1e0efd..3d07063007d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -66,7 +66,7 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { var ( // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() + protoMessageType = reflect.TypeFor[proto.Message]() ) // marshalNonProto marshals a non-message field of a protobuf message. @@ -325,9 +325,9 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } -var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() +var typeProtoEnum = reflect.TypeFor[protoEnum]() -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() +var typeProtoMessage = reflect.TypeFor[proto.Message]() // Delimiter for newline encoded JSON streams. func (j *JSONPb) Delimiter() []byte { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 19255ec441e..3eb16167173 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/health/grpc_health_v1" @@ -281,12 +282,19 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, ) { _, outboundMarshaler := MarshalerForRequest(s, r) + annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath)) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } - resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + var md ServerMetadata + resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{ Service: r.URL.Query().Get("service"), - }) + }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD)) + annotatedContext = NewServerMetadataContext(annotatedContext, md) if err != nil { - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } @@ -300,7 +308,7 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin err = status.Error(codes.NotFound, resp.String()) } - s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err) return } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7b762370e27..8f8dc65d38e 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -220,7 +220,7 @@ func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) return extractSummary(o, f), nil case dto.MetricType_UNTYPED: return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: return extractHistogram(o, f), nil } return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) @@ -403,9 +403,13 @@ func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { infSeen = true } + v := q.GetCumulativeCountFloat() + if v <= 0 { + v = float64(q.GetCumulativeCount()) + } samples = append(samples, &model.Sample{ Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), + Value: model.SampleValue(v), Timestamp: timestamp, }) } @@ -428,9 +432,13 @@ func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { } lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + v := m.Histogram.GetSampleCountFloat() + if v <= 0 { + v = float64(m.Histogram.GetSampleCount()) + } count := &model.Sample{ Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), + Value: model.SampleValue(v), Timestamp: timestamp, } samples = append(samples, count) diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8dbf6d04ed6..21b93bca362 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -160,38 +160,38 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E n, err = w.WriteString("# HELP ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Help, true) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } n, err = w.WriteString("# TYPE ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } switch metricType { case dto.MetricType_COUNTER: @@ -208,39 +208,41 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E n, err = w.WriteString(" unknown\n") case dto.MetricType_HISTOGRAM: n, err = w.WriteString(" histogram\n") + case dto.MetricType_GAUGE_HISTOGRAM: + n, err = w.WriteString(" gaugehistogram\n") default: return written, fmt.Errorf("unknown metric type %s", metricType.String()) } written += n if err != nil { - return + return written, err } if toOM.withUnit && in.Unit != nil { n, err = w.WriteString("# UNIT ") written += n if err != nil { - return + return written, err } n, err = writeName(w, compliantName) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Unit, true) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } @@ -304,7 +306,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } } n, err = writeOpenMetricsSample( @@ -314,7 +316,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } n, err = writeOpenMetricsSample( w, compliantName, "_count", metric, "", 0, @@ -325,7 +327,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) n += createdTsBytesWritten } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( "expected histogram in metric %s %s", compliantName, metric, @@ -333,6 +335,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E } infSeen := false for _, b := range metric.Histogram.Bucket { + if b.GetCumulativeCountFloat() > 0 { + return written, fmt.Errorf( + "OpenMetrics v1.0 does not support float histogram %s %s", + compliantName, metric, + ) + } n, err = writeOpenMetricsSample( w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), @@ -341,7 +349,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err } if math.IsInf(b.GetUpperBound(), +1) { infSeen = true @@ -354,9 +362,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E 0, metric.Histogram.GetSampleCount(), true, nil, ) + // We do not check for a float sample count here + // because we will check for it below (and error + // out if needed). written += n if err != nil { - return + return written, err } } n, err = writeOpenMetricsSample( @@ -366,7 +377,13 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E ) written += n if err != nil { - return + return written, err + } + if metric.Histogram.GetSampleCountFloat() > 0 { + return written, fmt.Errorf( + "OpenMetrics v1.0 does not support float histogram %s %s", + compliantName, metric, + ) } n, err = writeOpenMetricsSample( w, compliantName, "_count", metric, "", 0, @@ -384,10 +401,10 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E } written += n if err != nil { - return + return written, err } } - return + return written, err } // FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index c4e9c1bbc3a..6b897814564 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -108,38 +108,38 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e n, err = w.WriteString("# HELP ") written += n if err != nil { - return + return written, err } n, err = writeName(w, name) written += n if err != nil { - return + return written, err } err = w.WriteByte(' ') written++ if err != nil { - return + return written, err } n, err = writeEscapedString(w, *in.Help, false) written += n if err != nil { - return + return written, err } err = w.WriteByte('\n') written++ if err != nil { - return + return written, err } } n, err = w.WriteString("# TYPE ") written += n if err != nil { - return + return written, err } n, err = writeName(w, name) written += n if err != nil { - return + return written, err } metricType := in.GetType() switch metricType { @@ -151,14 +151,17 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e n, err = w.WriteString(" summary\n") case dto.MetricType_UNTYPED: n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // The classic Prometheus text format has no notion of a gauge + // histogram. We render a gauge histogram in the same way as a + // regular histogram. n, err = w.WriteString(" histogram\n") default: return written, fmt.Errorf("unknown metric type %s", metricType.String()) } written += n if err != nil { - return + return written, err } // Finally the samples, one line for each. @@ -208,7 +211,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } } n, err = writeSample( @@ -217,13 +220,13 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } n, err = writeSample( w, name, "_count", metric, "", 0, float64(metric.Summary.GetSampleCount()), ) - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( "expected histogram in metric %s %s", name, metric, @@ -231,28 +234,36 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } infSeen := false for _, b := range metric.Histogram.Bucket { + v := b.GetCumulativeCountFloat() + if v == 0 { + v = float64(b.GetCumulativeCount()) + } n, err = writeSample( w, name, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), + v, ) written += n if err != nil { - return + return written, err } if math.IsInf(b.GetUpperBound(), +1) { infSeen = true } } if !infSeen { + v := metric.Histogram.GetSampleCountFloat() + if v == 0 { + v = float64(metric.Histogram.GetSampleCount()) + } n, err = writeSample( w, name, "_bucket", metric, model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), + v, ) written += n if err != nil { - return + return written, err } } n, err = writeSample( @@ -261,12 +272,13 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e ) written += n if err != nil { - return + return written, err } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) + v := metric.Histogram.GetSampleCountFloat() + if v == 0 { + v = float64(metric.Histogram.GetSampleCount()) + } + n, err = writeSample(w, name, "_count", metric, "", 0, v) default: return written, fmt.Errorf( "unexpected type in metric %s %s", name, metric, @@ -274,10 +286,10 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } written += n if err != nil { - return + return written, err } } - return + return written, err } // writeSample writes a single sample in text format to w, given the metric diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 8f2edde3244..00c8841a108 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -48,8 +48,10 @@ func (e ParseError) Error() string { return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) } -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. +// TextParser is used to parse the simple and flat text-based exchange format. +// +// TextParser instances must be created with NewTextParser, the zero value of +// TextParser is invalid. type TextParser struct { metricFamiliesByName map[string]*dto.MetricFamily buf *bufio.Reader // Where the parsed input is read through. @@ -129,9 +131,44 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } + for _, histogramMetric := range p.histograms { + normalizeHistogram(histogramMetric.GetHistogram()) + } return p.metricFamiliesByName, p.err } +// normalizeHistogram makes sure that all the buckets and the count in each +// histogram is either completely float or completely integer. +func normalizeHistogram(histogram *dto.Histogram) { + if histogram == nil { + return + } + anyFloats := false + if histogram.GetSampleCountFloat() != 0 { + anyFloats = true + } else { + for _, b := range histogram.GetBucket() { + if b.GetCumulativeCountFloat() != 0 { + anyFloats = true + break + } + } + } + if !anyFloats { + return + } + if histogram.GetSampleCountFloat() == 0 { + histogram.SampleCountFloat = proto.Float64(float64(histogram.GetSampleCount())) + histogram.SampleCount = nil + } + for _, b := range histogram.GetBucket() { + if b.GetCumulativeCountFloat() == 0 { + b.CumulativeCountFloat = proto.Float64(float64(b.GetCumulativeCount())) + b.CumulativeCount = nil + } + } +} + func (p *TextParser) reset(in io.Reader) { p.metricFamiliesByName = map[string]*dto.MetricFamily{} p.currentLabelPairs = nil @@ -281,7 +318,9 @@ func (p *TextParser) readingLabels() stateFn { // Summaries/histograms are special. We have to reset the // currentLabels map, currentQuantile and currentBucket before starting to // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_SUMMARY || + p.currentMF.GetType() == dto.MetricType_HISTOGRAM || + p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { p.currentLabels = map[string]string{} p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() p.currentQuantile = math.NaN() @@ -374,7 +413,9 @@ func (p *TextParser) startLabelName() stateFn { // Special summary/histogram treatment. Don't add 'quantile' and 'le' // labels to 'real' labels. if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) && - (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) { + ((p.currentMF.GetType() != dto.MetricType_HISTOGRAM && + p.currentMF.GetType() != dto.MetricType_GAUGE_HISTOGRAM) || + p.currentLabelPair.GetName() != model.BucketLabel) { p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. @@ -425,7 +466,7 @@ func (p *TextParser) startLabelValue() stateFn { } } // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM || p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. @@ -476,7 +517,7 @@ func (p *TextParser) readingValue() stateFn { p.summaries[signature] = p.currentMetric p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: signature := model.LabelsToSignature(p.currentLabels) if histogram := p.histograms[signature]; histogram != nil { p.currentMetric = histogram @@ -522,24 +563,38 @@ func (p *TextParser) readingValue() stateFn { }, ) } - case dto.MetricType_HISTOGRAM: + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: // *sigh* if p.currentMetric.Histogram == nil { p.currentMetric.Histogram = &dto.Histogram{} } switch { case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + if uintValue := uint64(value); value == float64(uintValue) { + p.currentMetric.Histogram.SampleCount = proto.Uint64(uintValue) + } else { + if value < 0 { + p.parseError(fmt.Sprintf("negative count for histogram %q", p.currentMF.GetName())) + return nil + } + p.currentMetric.Histogram.SampleCountFloat = proto.Float64(value) + } case p.currentIsHistogramSum: p.currentMetric.Histogram.SampleSum = proto.Float64(value) case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) + b := &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + } + if uintValue := uint64(value); value == float64(uintValue) { + b.CumulativeCount = proto.Uint64(uintValue) + } else { + if value < 0 { + p.parseError(fmt.Sprintf("negative bucket population for histogram %q", p.currentMF.GetName())) + return nil + } + b.CumulativeCountFloat = proto.Float64(value) + } + p.currentMetric.Histogram.Bucket = append(p.currentMetric.Histogram.Bucket, b) } default: p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) @@ -602,10 +657,18 @@ func (p *TextParser) readingType() stateFn { if p.readTokenUntilNewline(false); p.err != nil { return nil // Unexpected end of input. } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + typ := strings.ToUpper(p.currentToken.String()) // Tolerate any combination of upper and lower case. + metricType, ok := dto.MetricType_value[typ] // Tolerate "gauge_histogram" (not originally part of the text format). if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil + // We also want to tolerate "gaugehistogram" to mark a gauge + // histogram, because that string is used in OpenMetrics. Note, + // however, that gauge histograms do not officially exist in the + // classic text format. + if typ != "GAUGEHISTOGRAM" { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + metricType = int32(dto.MetricType_GAUGE_HISTOGRAM) } p.currentMF.Type = dto.MetricType(metricType).Enum() return p.startOfLine @@ -855,7 +918,8 @@ func (p *TextParser) setOrCreateCurrentMF() { } histogramName := histogramMetricName(name) if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM || + p.currentMF.GetType() == dto.MetricType_GAUGE_HISTOGRAM { if isCount(name) { p.currentIsHistogramCount = true } diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 3c3bf910fdf..23ecd4505b2 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,7 +1,9 @@ version: "2" linters: enable: + - errorlint - forbidigo + - gocritic - godot - misspell - revive @@ -11,6 +13,20 @@ linters: forbid: - pattern: ^fmt\.Print.*$ msg: Do not commit print statements. + gocritic: + enable-all: true + disabled-checks: + - commentFormatting + - commentedOutCode + - deferInLoop + - filepathJoin + - hugeParam + - importShadow + - paramTypeCombine + - rangeValCopy + - tooManyResultsChecker + - unnamedResult + - whyNoLint godot: exclude: # Ignore "See: URL". @@ -19,16 +35,12 @@ linters: misspell: locale: US exclusions: - generated: lax presets: - comments - common-false-positives - legacy - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ + warn-unused: true formatters: enable: - gofmt @@ -37,9 +49,3 @@ formatters: goimports: local-prefixes: - github.com/prometheus/procfs - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 7edfe4d0932..bce50a19c50 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 The Prometheus Authors +# Copyright The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 4de21512ffb..6f61bec48fd 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -139,7 +139,7 @@ common-deps: update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ + $(GO) get $$m; \ done $(GO) mod tidy diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 2e53344151f..716bdef1090 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -73,15 +73,16 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { columns := strings.Fields(line) width := len(columns) - if width == expectedHeaderWidth || width == 0 { + switch width { + case expectedHeaderWidth, 0: continue - } else if width == expectedDataWidth { + case expectedDataWidth: entry, err := parseARPEntry(columns) if err != nil { return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) - } else { + default: return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 83807500908..53243e68758 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -64,14 +64,12 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { if bucketCount == -1 { bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) - } + } else if bucketCount != arraySize { + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { + for i := range arraySize { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go index bf4f3b48c0e..4f1cac1f0ac 100644 --- a/vendor/github.com/prometheus/procfs/cmdline.go +++ b/vendor/github.com/prometheus/procfs/cmdline.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f0950bb4953..5fe6cecd3dd 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 64cfd534c1f..8f155551e52 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index d88442f0edf..e81a5db9490 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index c11207f3ab6..4be2b1cc549 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index a6b2b3127cb..e713bae8dfe 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 003bc2ad4a3..0825aa1a830 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index 1c9b7313b6c..496770b05f6 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index fa3686bc004..b3228ce3d86 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index a0ef55562eb..575eb022eb0 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5f2a37a78b3..e4a5876eafb 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index f9d961e4417..26bfea071ba 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -1,4 +1,4 @@ -// Copyright 2014 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 9bdaccc7c8a..8f27912a13a 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 1b5bdbdf84a..3c53023c544 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index 80df79c3193..80fce484789 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index 7db86330779..9dde8570737 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -388,20 +388,21 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { } } case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { + switch strings.Split(fields[1], "=")[0] { + case "alo": err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) if err != nil { return &m, err } - } else if strings.Split(fields[1], "=")[0] == "inv" { + case "inv": err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, &m.CacheopSyncCacheInProgress) if err != nil { return &m, err } - } else { + default: err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3a43e83915f..e7ccad66b2e 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 5a7d2df06ae..30c5872019a 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 71b7a70ebd6..0e41f71af18 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index d5404a6d728..8318d8dfd51 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index 1d86f5e63f3..15bb096ee20 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go index fe2355d3c6f..e0ed671ea07 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index bc3a20c932d..5374da9fa89 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/kernel_hung.go b/vendor/github.com/prometheus/procfs/kernel_hung.go new file mode 100644 index 00000000000..539c1115142 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_hung.go @@ -0,0 +1,45 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "os" + "strconv" + "strings" +) + +// KernelHung contains information about to the kernel's hung_task_detect_count number. +type KernelHung struct { + // Indicates the total number of tasks that have been detected as hung since the system boot. + // This file shows up if `CONFIG_DETECT_HUNG_TASK` is enabled. + HungTaskDetectCount *uint64 +} + +// KernelHung returns values from /proc/sys/kernel/hung_task_detect_count. +func (fs FS) KernelHung() (KernelHung, error) { + data, err := os.ReadFile(fs.proc.Path("sys", "kernel", "hung_task_detect_count")) + if err != nil { + return KernelHung{}, err + } + val, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return KernelHung{}, err + } + return KernelHung{ + HungTaskDetectCount: &val, + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index db88566bdf0..b66565a1043 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 332e76c17f5..c8c78a65edc 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 1fd4381b221..d66eeda82a2 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -27,13 +27,34 @@ var ( recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[(\d+)\](\([SF]+\))?`) + personalitiesPrefix = "Personalities : " ) +type MDStatComponent struct { + // Name of the component device. + Name string + // DescriptorIndex number of component device, e.g. the order in the superblock. + DescriptorIndex int32 + // Flags per Linux drivers/md/md.[ch] as of v6.12-rc1 + // Subset that are exposed in mdstat + WriteMostly bool + Journal bool + Faulty bool // "Faulty" is what kernel source uses for "(F)" + Spare bool + Replacement bool + // Some additional flags that are NOT exposed in procfs today; they may + // be available via sysfs. + // In_sync, Bitmap_sync, Blocked, WriteErrorSeen, FaultRecorded, + // BlockedBadBlocks, WantReplacement, Candidate, ... +} + // MDStat holds info parsed from /proc/mdstat. type MDStat struct { // Name of the device. Name string + // raid type of the device. + Type string // activity-state of the device. ActivityState string // Number of active disks. @@ -58,8 +79,8 @@ type MDStat struct { BlocksSyncedFinishTime float64 // current sync speed (in Kilobytes/sec) BlocksSyncedSpeed float64 - // Name of md component devices - Devices []string + // component devices + Devices []MDStatComponent } // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of @@ -80,28 +101,52 @@ func (fs FS) MDStat() ([]MDStat, error) { // parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of // structs containing the relevant info. func parseMDStat(mdStatData []byte) ([]MDStat, error) { + // TODO: + // - parse global hotspares from the "unused devices" line. mdStats := []MDStat{} lines := strings.Split(string(mdStatData), "\n") + knownRaidTypes := make(map[string]bool) for i, line := range lines { if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { continue } + // Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] + if len(knownRaidTypes) == 0 && strings.HasPrefix(line, personalitiesPrefix) { + personalities := strings.Fields(line[len(personalitiesPrefix):]) + for _, word := range personalities { + word := word[1 : len(word)-1] + knownRaidTypes[word] = true + } + continue + } deviceFields := strings.Fields(line) if len(deviceFields) < 3 { return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive + state := deviceFields[2] // active, inactive, broken + + mdType := "unknown" // raid1, raid5, etc. + var deviceStartIndex int + if len(deviceFields) > 3 { // mdType may be in the 3rd or 4th field + if isRaidType(deviceFields[3], knownRaidTypes) { + mdType = deviceFields[3] + deviceStartIndex = 4 + } else if len(deviceFields) > 4 && isRaidType(deviceFields[4], knownRaidTypes) { + // if the 3rd field is (...), the 4th field is the mdType + mdType = deviceFields[4] + deviceStartIndex = 5 + } + } if len(lines) <= i+3 { return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } - // Failed disks have the suffix (F) & Spare disks have the suffix (S). + // Failed (Faulty) disks have the suffix (F) & Spare disks have the suffix (S). fail := int64(strings.Count(line, "(F)")) spare := int64(strings.Count(line, "(S)")) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) @@ -129,13 +174,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Append recovery and resyncing state info. if recovering || resyncing || checking || reshaping { - if recovering { + switch { + case recovering: state = "recovering" - } else if reshaping { + case reshaping: state = "reshaping" - } else if checking { + case checking: state = "checking" - } else { + default: state = "resyncing" } @@ -151,8 +197,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } } + devices, err := evalComponentDevices(deviceFields[deviceStartIndex:]) + if err != nil { + return nil, fmt.Errorf("error parsing components in md device %q: %w", mdName, err) + } + mdStats = append(mdStats, MDStat{ Name: mdName, + Type: mdType, ActivityState: state, DisksActive: active, DisksFailed: fail, @@ -165,14 +217,24 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, - Devices: evalComponentDevices(deviceFields), + Devices: devices, }) } return mdStats, nil } +// check if a string's format is like the mdType +// Rule 1: mdType should not be like (...) +// Rule 2: mdType should not be like sda[0] +// . +func isRaidType(mdType string, knownRaidTypes map[string]bool) bool { + _, ok := knownRaidTypes[mdType] + return !strings.ContainsAny(mdType, "([") && ok +} + func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + // e.g. 523968 blocks super 1.2 [4/4] [UUUU] statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) @@ -263,17 +325,29 @@ func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) +func evalComponentDevices(deviceFields []string) ([]MDStatComponent, error) { + mdComponentDevices := make([]MDStatComponent, 0) + for _, field := range deviceFields { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + descriptorIndex, err := strconv.ParseInt(match[2], 10, 32) + if err != nil { + return mdComponentDevices, fmt.Errorf("error parsing int from device %q: %w", match[2], err) } + mdComponentDevices = append(mdComponentDevices, MDStatComponent{ + Name: match[1], + DescriptorIndex: int32(descriptorIndex), + // match may contain one or more of these + // https://github.com/torvalds/linux/blob/7ec462100ef9142344ddbf86f2c3008b97acddbe/drivers/md/md.c#L8376-L8392 + Faulty: strings.Contains(match[3], "(F)"), + Spare: strings.Contains(match[3], "(S)"), + Journal: strings.Contains(match[3], "(J)"), + Replacement: strings.Contains(match[3], "(R)"), + WriteMostly: strings.Contains(match[3], "(W)"), + }) } - return mdComponentDevices + return mdComponentDevices, nil } diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 937e1f9606f..34203831871 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -307,7 +307,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { m.ZswapBytes = &valBytes case "Zswapped:": m.Zswapped = &val - m.ZswapBytes = &valBytes + m.ZswappedBytes = &valBytes case "Dirty:": m.Dirty = &val m.DirtyBytes = &valBytes diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index a704c5e735f..9414a12f42f 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -147,8 +147,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { // mountOptionsParser parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { + for opt := range strings.SplitSeq(mountOptions, ",") { splitOption := strings.Split(opt, "=") if len(splitOption) < 2 { key := splitOption[0] @@ -178,3 +177,21 @@ func GetProcMounts(pid int) ([]*MountInfo, error) { } return parseMountInfo(data) } + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func (fs FS) GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("self/mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func (fs FS) GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%d/mountinfo", pid))) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 50caa73274e..e503cb3a6c5 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -383,7 +383,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e if stats.Opts == nil { stats.Opts = map[string]string{} } - for _, opt := range strings.Split(ss[1], ",") { + for opt := range strings.SplitSeq(ss[1], ",") { split := strings.Split(opt, "=") if len(split) == 2 { stats.Opts[split[0]] = split[1] diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 316df5fbb74..e9ca3570790 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index e66208aa05f..7b3e1d61c95 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go index f50b38e3528..2a0f60f29fe 100644 --- a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "path/filepath" "strconv" "strings" ) @@ -56,7 +57,9 @@ func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { } for _, iFaceFile := range ifaceFiles { - f, err := os.Open(dir + "/" + iFaceFile.Name()) + filePath := filepath.Join(dir, iFaceFile.Name()) + + f, err := os.Open(filePath) if err != nil { return netDevSNMP6, err } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 19e3378f72d..9291f8cd4c8 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8d4b1ac05b0..eaa996cbcf1 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -169,7 +169,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro &pc.EnterMemoryPressure, } - for i := 0; i < len(capabilities); i++ { + for i := range capabilities { switch capabilities[i] { case "y": *capabilityFields[i] = true diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go index deb7029fe1e..fa3812d9d00 100644 --- a/vendor/github.com/prometheus/procfs/net_route.go +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -1,4 +1,4 @@ -// Copyright 2023 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index fae62b13d96..8b221ebfff7 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -139,9 +139,6 @@ func parseSockstatKVs(kvs []string) (map[string]int, error) { func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { var nsp NetSockstatProtocol for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v switch k { case "inuse": nsp.InUse = v diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 71c8059f4d7..4a2dfa18fd8 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 0396d72015c..610ea78e564 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go index 13994c1782f..b1b3f6a6a2f 100644 --- a/vendor/github.com/prometheus/procfs/net_tls_stat.go +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -1,4 +1,4 @@ -// Copyright 2023 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go index 9ac3daf2d4c..8a327791026 100644 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d7e0cacb4c6..e4d63592365 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7c597bc8708..69d07944516 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -1,4 +1,4 @@ -// Copyright 2023 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index 932ef204684..5a9f497d190 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -1,4 +1,4 @@ -// Copyright 2017 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 742dff453ba..dbdae473924 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/nfnetlink_queue.go b/vendor/github.com/prometheus/procfs/nfnetlink_queue.go new file mode 100644 index 00000000000..b0a73b11e9e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfnetlink_queue.go @@ -0,0 +1,85 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +const nfNetLinkQueueFormat = "%d %d %d %d %d %d %d %d %d" + +// NFNetLinkQueue contains general information about netfilter queues found in /proc/net/netfilter/nfnetlink_queue. +type NFNetLinkQueue struct { + // id of the queue + QueueID uint + // pid of process handling the queue + PeerPID uint + // number of packets waiting for a decision + QueueTotal uint + // indicate how userspace receive packets + CopyMode uint + // size of copy + CopyRange uint + // number of items dropped by the kernel because too many packets were waiting a decision. + // It queue_total is superior to queue_max_len (1024 per default) the packets are dropped. + QueueDropped uint + // number of packets dropped by userspace (due to kernel send failure on the netlink socket) + QueueUserDropped uint + // sequence number of packets queued. It gives a correct approximation of the number of queued packets. + SequenceID uint + // internal value (number of entity using the queue) + Use uint +} + +// NFNetLinkQueue returns information about current state of netfilter queues. +func (fs FS) NFNetLinkQueue() ([]NFNetLinkQueue, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/netfilter/nfnetlink_queue")) + if err != nil { + return nil, err + } + + queue := []NFNetLinkQueue{} + if len(data) == 0 { + return queue, nil + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + nFNetLinkQueue, err := parseNFNetLinkQueueLine(line) + if err != nil { + return nil, err + } + queue = append(queue, *nFNetLinkQueue) + } + return queue, nil +} + +// parseNFNetLinkQueueLine parses each line of the /proc/net/netfilter/nfnetlink_queue file. +func parseNFNetLinkQueueLine(line string) (*NFNetLinkQueue, error) { + nFNetLinkQueue := NFNetLinkQueue{} + _, err := fmt.Sscanf( + line, nfNetLinkQueueFormat, + &nFNetLinkQueue.QueueID, &nFNetLinkQueue.PeerPID, &nFNetLinkQueue.QueueTotal, &nFNetLinkQueue.CopyMode, + &nFNetLinkQueue.CopyRange, &nFNetLinkQueue.QueueDropped, &nFNetLinkQueue.QueueUserDropped, &nFNetLinkQueue.SequenceID, &nFNetLinkQueue.Use, + ) + if err != nil { + return nil, err + } + return &nFNetLinkQueue, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 368187fa884..39c14aa55eb 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -49,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil || errors.Unwrap(err) == ErrMountPoint { + if err != nil || errors.Is(err, ErrMountPoint) { return Proc{}, err } return fs.Self() diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index 4a64347c03a..535c08d6fc0 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 5dd4938999a..0b275c3b1f5 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -40,13 +40,13 @@ type CgroupSummary struct { // parseCgroupSummary parses each line of the /proc/cgroup file // Line format is `subsys_name hierarchy num_cgroups enabled`. -func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { +func parseCgroupSummaryString(cgroupSummaryStr string) (*CgroupSummary, error) { var err error - fields := strings.Fields(CgroupSummaryStr) + fields := strings.Fields(cgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), cgroupSummaryStr) } CgroupSummary := &CgroupSummary{ diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 57a89895d66..5b941de0477 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index fa761b35295..fa57761dbe3 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -60,15 +60,16 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { text = scanner.Text() - if rPos.MatchString(text) { + switch { + case rPos.MatchString(text): pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { + case rFlags.MatchString(text): flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { + case rMntID.MatchString(text): mntid = rMntID.FindStringSubmatch(text)[1] - } else if rIno.MatchString(text) { + case rIno.MatchString(text): ino = rIno.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { + case rInotify.MatchString(text): newInotify, err := parseInotifyInfo(text) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 86b4b452463..b942c50723c 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index d15b66ddb64..dd8086ba2e7 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 9530b14bc68..4b7d3378471 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -19,6 +19,7 @@ import ( "os" "regexp" "strconv" + "strings" ) // ProcLimits represents the soft limits for each of the process's resource @@ -74,7 +75,7 @@ const ( ) var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) + limitsMatch = regexp.MustCompile(`(Max \w+\s??\w*\s?\w*)\s{2,}(\w+)\s+(\w+)`) ) // NewLimits returns the current soft limits of the process. @@ -106,7 +107,7 @@ func (p Proc) Limits() (ProcLimits, error) { return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } - switch fields[1] { + switch strings.TrimSpace(fields[1]) { case "Max cpu time": l.CPUTime, err = parseUint(fields[2]) case "Max file size": diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 7e75c286b5b..cc519f92f9a 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 4248c1716ee..7f94cc89145 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 0f8f847f954..5fc0eb9e2f9 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index ccd35f153a0..cc2c5de873e 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 9a297afcf89..3e48afd1d39 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 4bdc90b07ea..8d9a9bcd67a 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index fb7fd3995be..841fef46492 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3328556bdc4..02e3f9e316a 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go index ed579842437..b0a93601677 100644 --- a/vendor/github.com/prometheus/procfs/proc_statm.go +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -1,4 +1,4 @@ -// Copyright 2025 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -80,7 +80,7 @@ func (p Proc) Statm() (ProcStatm, error) { func parseStatm(data []byte) ([]uint64, error) { var statmSlice []uint64 statmItems := strings.Fields(string(data)) - for i := 0; i < len(statmItems); i++ { + for i := range statmItems { statmItem, err := strconv.ParseUint(statmItems[i], 10, 64) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index dd8aa56885e..1ed2bced417 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,7 +16,7 @@ package procfs import ( "bytes" "math/bits" - "sort" + "slices" "strconv" "strings" @@ -94,8 +94,7 @@ func (p Proc) NewStatus() (ProcStatus, error) { s := ProcStatus{PID: p.PID} - lines := strings.Split(string(data), "\n") - for _, line := range lines { + for line := range strings.SplitSeq(string(data), "\n") { if !bytes.Contains([]byte(line), []byte(":")) { continue } @@ -222,7 +221,7 @@ func calcCpusAllowedList(cpuString string) []uint64 { } - sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + slices.Sort(g) return g } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 3810d1ac999..52658a4d52d 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 5f7f32dc83c..fafd8dff740 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 8611c901770..32a04678ad0 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 403e6ae7086..47b73a7297b 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index e36b41c18a9..593ad0f62f0 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,6 +16,7 @@ package procfs import ( "bufio" "bytes" + "errors" "fmt" "io" "strconv" @@ -92,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, &cpuStat.Guest, &cpuStat.GuestNice) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 65fec834bf4..ee17bf4888c 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index 80e0e947be7..0cfbb541844 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 51c49d89e81..2a8d7639097 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index e54d94b0903..806e171147d 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -88,11 +88,9 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { zoneinfo := []Zoneinfo{} - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { + for block := range bytes.SplitSeq(zoneinfoData, []byte("\nNode")) { var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { + for line := range strings.SplitSeq(string(block), "\n") { if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { zoneinfoElement.Node = nodeZone[1] diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go index e854d7e84e8..2950fdb42ee 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go index 29e629d6674..5bb3b16c704 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error { // strings or integers. type protoUint64 uint64 -// Int64 returns the protoUint64 as a uint64. +// Uint64 returns the protoUint64 as a uint64. func (i *protoUint64) Uint64() uint64 { return uint64(*i) } // UnmarshalJSON decodes both strings and integers. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go index a13a6b733da..67f80b6aa07 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math" "time" ) @@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) { }{ Alias: Alias(s), ParentSpanID: parentSpanId, - StartTime: uint64(startT), - EndTime: uint64(endT), + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. }) } @@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error { case "startTimeUnixNano", "start_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.StartTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.StartTime = time.Unix(0, v) case "endTimeUnixNano", "end_time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - s.EndTime = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + s.EndTime = time.Unix(0, v) case "attributes": err = decoder.Decode(&s.Attrs) case "droppedAttributesCount", "dropped_attributes_count": @@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -263,26 +273,30 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied +// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. @@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) { Time uint64 `json:"timeUnixNano,omitempty"` }{ Alias: Alias(e), - Time: uint64(t), + Time: uint64(t), //nolint:gosec // >0 checked above }) } @@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { case "timeUnixNano", "time_unix_nano": var val protoUint64 err = decoder.Decode(&val) - se.Time = time.Unix(0, int64(val.Uint64())) + v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked. + se.Time = time.Unix(0, v) case "name": err = decoder.Decode(&se.Name) case "attributes": @@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go index 1217776ead1..a2802764f81 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go index 69a348f0f06..44197b80849 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go index 0dd01b063a3..022768bb501 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -1,8 +1,6 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -//go:generate stringer -type=ValueKind -trimprefix=ValueKind - package telemetry import ( @@ -23,7 +21,7 @@ import ( // A zero value is valid and represents an empty value. type Value struct { // Ensure forward compatibility by explicitly making this not comparable. - noCmp [0]func() //nolint: unused // This is indeed used. + noCmp [0]func() //nolint:unused // This is indeed used. // num holds the value for Int64, Float64, and Bool. It holds the length // for String, Bytes, Slice, Map. @@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: ValueKindInt64} + return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv. } // Float64Value returns a [Value] for a float64. @@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 { // this will return garbage. func (v Value) asInt64() int64 { // Assumes v.num was a valid int64 (overflow not checked). - return int64(v.num) // nolint: gosec + return int64(v.num) //nolint:gosec // Bounded. } // AsBool returns the value held by v as a bool. @@ -309,13 +307,13 @@ func (v Value) String() string { return v.asString() case ValueKindInt64: // Assumes v.num was a valid int64 (overflow not checked). - return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded. case ValueKindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: @@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) { case ValueKindInt64: return json.Marshal(struct { Value string `json:"intValue"` - }{strconv.FormatInt(int64(v.num), 10)}) + }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv. case ValueKindFloat64: return json.Marshal(struct { Value float64 `json:"doubleValue"` diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go index 6ebea12a9e9..815d271ffb2 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/span.go +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -6,6 +6,7 @@ package sdk import ( "encoding/json" "fmt" + "math" "reflect" "runtime" "strings" @@ -16,7 +17,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" @@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { limit := maxSpan.Attrs if limit == 0 { // No attributes allowed. - s.span.DroppedAttrs += uint32(len(attrs)) + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked. + min(n, math.MaxUint32), + ) + } return } @@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) { // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // number of dropped attributes is also returned. func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) if limit == 0 { - return nil, uint32(len(attrs)) + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked. + } + return nil, out } if limit < 0 { @@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u return convAttrs(attrs), 0 } - limit = min(len(attrs), limit) - return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked. } func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go index cbcfabde3b1..e09acf022fa 100644 --- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -5,6 +5,7 @@ package sdk import ( "context" + "math" "time" "go.opentelemetry.io/otel/trace" @@ -21,15 +22,20 @@ type tracer struct { var _ trace.Tracer = tracer{} -func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - var psc trace.SpanContext +func (t tracer) Start( + ctx context.Context, + name string, + opts ...trace.SpanStartOption, +) (context.Context, trace.Span) { + var psc, sc trace.SpanContext sampled := true span := new(span) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = trace.ContextWithSpan(ctx, span) @@ -58,7 +64,13 @@ func (t *tracer) start( // start is used for testing. var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} -func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { +var intToUint32Bound = min(math.MaxInt, math.MaxUint32) + +func (t tracer) traces( + name string, + cfg trace.SpanConfig, + sc, psc trace.SpanContext, +) (*telemetry.Traces, *telemetry.Span) { span := &telemetry.Span{ TraceID: telemetry.TraceID(sc.TraceID()), SpanID: telemetry.SpanID(sc.SpanID()), @@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont links := cfg.Links() if limit := maxSpan.Links; limit == 0 { - span.DroppedLinks = uint32(len(links)) + n := len(links) + if n > 0 { + bounded := max(min(n, intToUint32Bound), 0) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. + } } else { if limit > 0 { n := max(len(links)-limit, 0) - span.DroppedLinks = uint32(n) + bounded := min(n, intToUint32Bound) + span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked. links = links[n:] } span.Links = convLinks(links) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 521daa25dbf..e980ab62b85 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -14,9 +14,15 @@ import ( // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. // Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. +// +// Deprecated: [DefaultClient] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. +// +// Deprecated: [Get] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody) if err != nil { @@ -26,6 +32,9 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) } // Head is a convenient replacement for http.Head that adds a span around the request. +// +// Deprecated: [Head] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody) if err != nil { @@ -35,6 +44,9 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error } // Post is a convenient replacement for http.Post that adds a span around the request. +// +// Deprecated: [Post] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { @@ -45,6 +57,9 @@ func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (r } // PostForm is a convenient replacement for http.PostForm that adds a span around the request. +// +// Deprecated: [PostForm] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 38fb79c0328..c3be78616b2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -26,7 +26,6 @@ type config struct { Meter metric.Meter Propagators propagation.TextMapPropagator SpanStartOptions []trace.SpanStartOption - PublicEndpoint bool PublicEndpointFn func(*http.Request) bool ReadEvent bool WriteEvent bool @@ -96,17 +95,19 @@ func WithMeterProvider(provider metric.MeterProvider) Option { // WithPublicEndpoint configures the Handler to link the span with an incoming // span context. If this option is not provided, then the association is a child // association instead of a link. +// +// Deprecated: Use [WithPublicEndpointFn] instead. +// To migrate, replace WithPublicEndpoint() with: +// +// WithPublicEndpointFn(func(*http.Request) bool { return true }) func WithPublicEndpoint() Option { - return optionFunc(func(c *config) { - c.PublicEndpoint = true - }) + return WithPublicEndpointFn(func(*http.Request) bool { return true }) } // WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. -// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. func WithPublicEndpointFn(fn func(*http.Request) bool) Option { return optionFunc(func(c *config) { c.PublicEndpointFn = fn @@ -143,11 +144,13 @@ func WithFilter(f Filter) Option { }) } -type event int +// Event represents message event types for [WithMessageEvents]. +type Event int // Different types of events that can be recorded, see WithMessageEvents. const ( - ReadEvents event = iota + unspecifiedEvents Event = iota + ReadEvents WriteEvents ) @@ -160,7 +163,7 @@ const ( // using the ReadBytesKey // - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write // using the WriteBytesKey -func WithMessageEvents(events ...event) Option { +func WithMessageEvents(events ...Event) Option { return optionFunc(func(c *config) { for _, e := range events { switch e { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 56b24b982ae..1c9aa3ff425 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -2,6 +2,5 @@ // SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended -// to be used to add tracing by wrapping existing handlers (with Handler) and -// routes WithRouteTag. +// to be used to add tracing by wrapping existing handlers. package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index fef83b42fe1..c1bbf3a3c2d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -29,7 +29,6 @@ type middleware struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - publicEndpoint bool publicEndpointFn func(*http.Request) bool metricAttributesFn func(*http.Request) []attribute.KeyValue @@ -77,7 +76,6 @@ func (h *middleware) configure(c *config) { h.writeEvent = c.WriteEvent h.filters = c.Filters h.spanNameFormatter = c.SpanNameFormatter - h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) @@ -102,7 +100,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } opts = append(opts, h.spanStartOptions...) - if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + if h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx)) { opts = append(opts, trace.WithNewRoot()) // Linking incoming span context if any for public endpoint. if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { @@ -224,6 +222,9 @@ func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.Ke // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. +// +// Deprecated: spans are automatically annotated with the route attribute. +// To annotate metrics, use the [WithMetricAttributesFn] option. func WithRouteTag(route string, h http.Handler) http.Handler { attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go new file mode 100644 index 00000000000..45d3d934f52 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go @@ -0,0 +1,305 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/client.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "reflect" + "slices" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" +) + +type HTTPClient struct{ + requestBodySize httpconv.ClientRequestBodySize + requestDuration httpconv.ClientRequestDuration +} + +func NewHTTPClient(meter metric.Meter) HTTPClient { + client := HTTPClient{} + + var err error + client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) + handleErr(err) + + client.requestDuration, err = httpconv.NewClientRequestDuration( + meter, + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return client +} + +func (n HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconv.URLFull(u)) + + attrs = append(attrs, semconv.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconv.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconv.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconv.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconv.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n HTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconv.ErrorTypeOther + } + + return semconv.ErrorTypeKey.String(value) +} + +func (n HTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconv.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconv.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconv.HTTPRequestMethodGet, orig +} + +func (n HTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconv.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconv.ServerAddress(requestHost), + n.scheme(req), + ) + + if port > 0 { + attributes = append(attributes, semconv.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (n HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := n.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + + return opts +} + +func (n HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { + n.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + n.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) +} + +// TraceAttributes returns attributes for httptrace. +func (n HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconv.ServerAddress(host), + } +} + +func (n HTTPClient) scheme(req *http.Request) attribute.KeyValue { + if req.URL != nil && req.URL.Scheme != "" { + return semconv.URLScheme(req.URL.Scheme) + } + if req.TLS != nil { + return semconv.URLScheme("https") + } + return semconv.URLScheme("http") +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go deleted file mode 100644 index 821b80ec41c..00000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ /dev/null @@ -1,248 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/env.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "context" - "fmt" - "net/http" - "strings" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" -) - -// OTelSemConvStabilityOptIn is an environment variable. -// That can be set to "http/dup" to keep getting the old HTTP semantic conventions. -const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" - -type ResponseTelemetry struct { - StatusCode int - ReadBytes int64 - ReadError error - WriteBytes int64 - WriteError error -} - -type HTTPServer struct { - requestBodySizeHistogram httpconv.ServerRequestBodySize - responseBodySizeHistogram httpconv.ServerResponseBodySize - requestDurationHistogram httpconv.ServerRequestDuration -} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - return CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) -} - -func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { - return []attribute.KeyValue{ - CurrentHTTPServer{}.NetworkTransportAttr(network), - } -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - return CurrentHTTPServer{}.ResponseTraceAttrs(resp) -} - -// Route returns the attribute for the route. -func (s HTTPServer) Route(route string) attribute.KeyValue { - return CurrentHTTPServer{}.Route(route) -} - -// Status returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (s HTTPServer) Status(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} - -type ServerMetricData struct { - ServerName string - ResponseSize int64 - - MetricData - MetricAttributes -} - -type MetricAttributes struct { - Req *http.Request - StatusCode int - AdditionalAttributes []attribute.KeyValue -} - -type MetricData struct { - RequestSize int64 - - // The request duration, in milliseconds - ElapsedTime float64 -} - -var ( - metricAddOptionPool = &sync.Pool{ - New: func() any { - return &[]metric.AddOption{} - }, - } - - metricRecordOptionPool = &sync.Pool{ - New: func() any { - return &[]metric.RecordOption{} - }, - } -) - -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) - *recordOpts = append(*recordOpts, o) - s.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...) - s.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...) - s.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) - *recordOpts = (*recordOpts)[:0] - metricRecordOptionPool.Put(recordOpts) -} - -// hasOptIn returns true if the comma-separated version string contains the -// exact optIn value. -func hasOptIn(version, optIn string) bool { - for _, v := range strings.Split(version, ",") { - if strings.TrimSpace(v) == optIn { - return true - } - } - return false -} - -func NewHTTPServer(meter metric.Meter) HTTPServer { - server := HTTPServer{} - - var err error - server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) - handleErr(err) - - server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter) - handleErr(err) - - server.requestDurationHistogram, err = httpconv.NewServerRequestDuration( - meter, - metric.WithExplicitBucketBoundaries( - 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, - 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, - ), - ) - handleErr(err) - return server -} - -type HTTPClient struct { - requestBodySize httpconv.ClientRequestBodySize - requestDuration httpconv.ClientRequestDuration -} - -func NewHTTPClient(meter metric.Meter) HTTPClient { - client := HTTPClient{} - - var err error - client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) - handleErr(err) - - client.requestDuration, err = httpconv.NewClientRequestDuration( - meter, - metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), - ) - handleErr(err) - - return client -} - -// RequestTraceAttrs returns attributes for an HTTP request made by a client. -func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - return CurrentHTTPClient{}.RequestTraceAttrs(req) -} - -// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. -func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - return CurrentHTTPClient{}.ResponseTraceAttrs(resp) -} - -func (c HTTPClient) Status(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -func (c HTTPClient) ErrorType(err error) attribute.KeyValue { - return CurrentHTTPClient{}.ErrorType(err) -} - -type MetricOpts struct { - measurement metric.MeasurementOption - addOptions metric.AddOption -} - -func (o MetricOpts) MeasurementOption() metric.MeasurementOption { - return o.measurement -} - -func (o MetricOpts) AddOptions() metric.AddOption { - return o.addOptions -} - -func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { - opts := map[string]MetricOpts{} - - attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["new"] = MetricOpts{ - measurement: set, - addOptions: set, - } - - return opts -} - -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { - s.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) - s.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) -} - -func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { - return CurrentHTTPClient{}.TraceAttributes(host) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go index 1bb207b8092..a8a0d58df35 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -6,10 +6,10 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ // Generate semconv package: //go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=common_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/server.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=server.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/server_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=server_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/client.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=client.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/client_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=client_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go deleted file mode 100644 index 28c51a3b389..00000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ /dev/null @@ -1,517 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv provides OpenTelemetry semantic convention types and -// functionality. -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "fmt" - "net/http" - "reflect" - "slices" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" -) - -type RequestTraceAttrsOpts struct { - // If set, this is used as value for the "http.client_ip" attribute. - HTTPClientIP string -} - -type CurrentHTTPServer struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - count := 3 // ServerAddress, Method, Scheme - - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - count++ - } - - method, methodOriginal := n.method(req.Method) - if methodOriginal != (attribute.KeyValue{}) { - count++ - } - - scheme := n.scheme(req.TLS != nil) - - peer, peerPort := SplitHostPort(req.RemoteAddr) - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - count++ - if peerPort > 0 { - count++ - } - } - - useragent := req.UserAgent() - if useragent != "" { - count++ - } - - // For client IP, use, in order: - // 1. The value passed in the options - // 2. The value in the X-Forwarded-For header - // 3. The peer address - clientIP := opts.HTTPClientIP - if clientIP == "" { - clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP == "" { - clientIP = peer - } - } - if clientIP != "" { - count++ - } - - if req.URL != nil && req.URL.Path != "" { - count++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - count++ - } - if protoVersion != "" { - count++ - } - - route := httpRoute(req.Pattern) - if route != "" { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - attrs = append(attrs, - semconvNew.ServerAddress(host), - method, - scheme, - ) - - if hostPort > 0 { - attrs = append(attrs, semconvNew.ServerPort(hostPort)) - } - if methodOriginal != (attribute.KeyValue{}) { - attrs = append(attrs, methodOriginal) - } - - if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) - if peerPort > 0 { - attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, semconvNew.ClientAddress(clientIP)) - } - - if req.URL != nil && req.URL.Path != "" { - attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if route != "" { - attrs = append(attrs, n.Route(route)) - } - - return attrs -} - -func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { - switch network { - case "tcp", "tcp4", "tcp6": - return semconvNew.NetworkTransportTCP - case "udp", "udp4", "udp6": - return semconvNew.NetworkTransportUDP - case "unix", "unixgram", "unixpacket": - return semconvNew.NetworkTransportUnix - default: - return semconvNew.NetworkTransportPipe - } -} - -func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter - if https { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP -// response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will -// be omitted. -func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - var count int - - if resp.ReadBytes > 0 { - count++ - } - if resp.WriteBytes > 0 { - count++ - } - if resp.StatusCode > 0 { - count++ - } - - attributes := make([]attribute.KeyValue, 0, count) - - if resp.ReadBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), - ) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), - ) - } - if resp.StatusCode > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseStatusCode(resp.StatusCode), - ) - } - - return attributes -} - -// Route returns the attribute for the route. -func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { - return semconvNew.HTTPRoute(route) -} - -func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - num := len(additionalAttributes) + 3 - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - num++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - num++ - } - if protoVersion != "" { - num++ - } - - if statusCode > 0 { - num++ - } - - attributes := slices.Grow(additionalAttributes, num) - attributes = append(attributes, - semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), - n.scheme(req.TLS != nil), - semconvNew.ServerAddress(host)) - - if hostPort > 0 { - attributes = append(attributes, semconvNew.ServerPort(hostPort)) - } - if protoName != "" { - attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) - } - return attributes -} - -type CurrentHTTPClient struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - /* - below attributes are returned: - - http.request.method - - http.request.method.original - - url.full - - server.address - - server.port - - network.protocol.name - - network.protocol.version - */ - numOfAttributes := 3 // URL, server address, proto, and method. - - var urlHost string - if req.URL != nil { - urlHost = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if eligiblePort > 0 { - numOfAttributes++ - } - useragent := req.UserAgent() - if useragent != "" { - numOfAttributes++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - numOfAttributes++ - } - if protoVersion != "" { - numOfAttributes++ - } - - method, originalMethod := n.method(req.Method) - if originalMethod != (attribute.KeyValue{}) { - numOfAttributes++ - } - - attrs := make([]attribute.KeyValue, 0, numOfAttributes) - - attrs = append(attrs, method) - if originalMethod != (attribute.KeyValue{}) { - attrs = append(attrs, originalMethod) - } - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, semconvNew.URLFull(u)) - - attrs = append(attrs, semconvNew.ServerAddress(requestHost)) - if eligiblePort > 0 { - attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - return attrs -} - -// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - /* - below attributes are returned: - - http.response.status_code - - error.type - */ - var count int - if resp.StatusCode > 0 { - count++ - } - - if isErrorStatusCode(resp.StatusCode) { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - if resp.StatusCode > 0 { - attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) - } - - if isErrorStatusCode(resp.StatusCode) { - errorType := strconv.Itoa(resp.StatusCode) - attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) - } - return attrs -} - -func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) - } - - if value == "" { - return semconvNew.ErrorTypeOther - } - - return semconvNew.ErrorTypeKey.String(value) -} - -func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - num := len(additionalAttributes) + 2 - var h string - if req.URL != nil { - h = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if port > 0 { - num++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - num++ - } - if protoVersion != "" { - num++ - } - - if statusCode > 0 { - num++ - } - - attributes := slices.Grow(additionalAttributes, num) - attributes = append(attributes, - semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), - semconvNew.ServerAddress(requestHost), - n.scheme(req), - ) - - if port > 0 { - attributes = append(attributes, semconvNew.ServerPort(port)) - } - if protoName != "" { - attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) - } - return attributes -} - -// TraceAttributes returns attributes for httptrace. -func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { - return []attribute.KeyValue{ - semconvNew.ServerAddress(host), - } -} - -func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue { - if req.URL != nil && req.URL.Scheme != "" { - return semconvNew.URLScheme(req.URL.Scheme) - } - if req.TLS != nil { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -func isErrorStatusCode(code int) bool { - return code >= 400 || code < 100 -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go new file mode 100644 index 00000000000..5ae6a07386b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go @@ -0,0 +1,403 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/server.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "slices" + "strings" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" +) + +type RequestTraceAttrsOpts struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct{ + requestBodySizeHistogram httpconv.ServerRequestBodySize + responseBodySizeHistogram httpconv.ServerResponseBodySize + requestDurationHistogram httpconv.ServerRequestDuration +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + server := HTTPServer{} + + var err error + server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) + handleErr(err) + + server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter) + handleErr(err) + + server.requestDurationHistogram, err = httpconv.NewServerRequestDuration( + meter, + metric.WithExplicitBucketBoundaries( + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, + 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + ), + ) + handleErr(err) + return server +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (n HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + peer, peerPort := SplitHostPort(req.RemoteAddr) + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + route := httpRoute(req.Pattern) + if route != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconv.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconv.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconv.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconv.NetworkPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, semconv.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconv.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconv.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconv.NetworkProtocolVersion(protoVersion)) + } + + if route != "" { + attrs = append(attrs, n.Route(route)) + } + + return attrs +} + +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { + attr := semconv.NetworkTransportPipe + switch network { + case "tcp", "tcp4", "tcp6": + attr = semconv.NetworkTransportTCP + case "udp", "udp4", "udp6": + attr = semconv.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + attr = semconv.NetworkTransportUnix + } + + return []attribute.KeyValue{attr} +} + +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { + Req *http.Request + StatusCode int + Route string + AdditionalAttributes []attribute.KeyValue +} + +type MetricData struct { + RequestSize int64 + + // The request duration, in milliseconds + ElapsedTime float64 +} + +var ( + metricAddOptionPool = &sync.Pool{ + New: func() any { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() any { + return &[]metric.RecordOption{} + }, + } +) + +func (n HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + attributes := n.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.Route, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + n.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...) + n.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...) + n.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) +} + +func (n HTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconv.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconv.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconv.HTTPRequestMethodGet, orig +} + +func (n HTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter + if https { + return semconv.URLScheme("https") + } + return semconv.URLScheme("http") +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP +// response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will +// be omitted. +func (n HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconv.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconv.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconv.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n HTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +func (n HTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, route string, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + if route != "" { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconv.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconv.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPResponseStatusCode(statusCode)) + } + + if route != "" { + attributes = append(attributes, semconv.HTTPRoute(route)) + } + return attributes +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index dfb53cf1f3a..6e096da5e21 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.63.0" + return "0.64.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go index fabf952c46d..aa33a09b879 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/doc.go @@ -2,33 +2,4 @@ // SPDX-License-Identifier: Apache-2.0 // Package runtime implements the conventional runtime metrics specified by OpenTelemetry. -// -// The metric events produced are: -// -// go.memory.used By Memory used by the Go runtime. -// go.memory.limit By Go runtime memory limit configured by the user, if a limit exists. -// go.memory.allocated By Memory allocated to the heap by the application. -// go.memory.allocations {allocation} Count of allocations to the heap by the application. -// go.memory.gc.goal By Heap size target for the end of the GC cycle. -// go.goroutine.count {goroutine} Count of live goroutines. -// go.processor.limit {thread} The number of OS threads that can execute user-level Go code simultaneously. -// go.config.gogc % Heap size target percentage configured by the user, otherwise 100. -// -// When the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable is set to -// true, the following deprecated metrics are produced: -// -// runtime.go.cgo.calls - Number of cgo calls made by the current process -// runtime.go.gc.count - Number of completed garbage collection cycles -// runtime.go.gc.pause_ns (ns) Amount of nanoseconds in GC stop-the-world pauses -// runtime.go.gc.pause_total_ns (ns) Cumulative nanoseconds in GC stop-the-world pauses since the program started -// runtime.go.goroutines - Number of goroutines that currently exist -// runtime.go.lookups - Number of pointer lookups performed by the runtime -// runtime.go.mem.heap_alloc (bytes) Bytes of allocated heap objects -// runtime.go.mem.heap_idle (bytes) Bytes in idle (unused) spans -// runtime.go.mem.heap_inuse (bytes) Bytes in in-use spans -// runtime.go.mem.heap_objects - Number of allocated heap objects -// runtime.go.mem.heap_released (bytes) Bytes of idle spans whose physical memory has been returned to the OS -// runtime.go.mem.heap_sys (bytes) Bytes of heap memory obtained from the OS -// runtime.go.mem.live_objects - Number of live objects is the number of cumulative Mallocs - Frees -// runtime.uptime (ms) Milliseconds since application was initialized package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/producer.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/producer.go index 82426b3f2ae..3c30b64aadf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/producer.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/producer.go @@ -34,6 +34,10 @@ type Producer struct { var _ metric.Producer = (*Producer)(nil) // NewProducer creates a Producer which provides precomputed histogram metrics from the go runtime. +// +// Metrics emitted by NewProducer include: +// +// go.schedule.duration s The time goroutines have spent in the scheduler in a runnable state before actually running. func NewProducer(opts ...ProducerOption) *Producer { c := newProducerConfig(opts...) return &Producer{ diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go index 3dc4ad8c5aa..93f20de5c97 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/runtime.go @@ -37,6 +37,35 @@ const ( // Start initializes reporting of runtime metrics using the supplied config. // For goroutine scheduling metrics, additionally see [NewProducer]. +// +// Metrics emitted by Start includes: +// +// go.memory.used By Memory used by the Go runtime. +// go.memory.limit By Go runtime memory limit configured by the user, if a limit exists. +// go.memory.allocated By Memory allocated to the heap by the application. +// go.memory.allocations {allocation} Count of allocations to the heap by the application. +// go.memory.gc.goal By Heap size target for the end of the GC cycle. +// go.goroutine.count {goroutine} Count of live goroutines. +// go.processor.limit {thread} The number of OS threads that can execute user-level Go code simultaneously. +// go.config.gogc % Heap size target percentage configured by the user, otherwise 100. +// +// When the OTEL_GO_X_DEPRECATED_RUNTIME_METRICS environment variable is set to +// true, the following deprecated metrics are produced: +// +// runtime.go.cgo.calls - Number of cgo calls made by the current process +// runtime.go.gc.count - Number of completed garbage collection cycles +// runtime.go.gc.pause_ns (ns) Amount of nanoseconds in GC stop-the-world pauses +// runtime.go.gc.pause_total_ns (ns) Cumulative nanoseconds in GC stop-the-world pauses since the program started +// runtime.go.goroutines - Number of goroutines that currently exist +// runtime.go.lookups - Number of pointer lookups performed by the runtime +// runtime.go.mem.heap_alloc (bytes) Bytes of allocated heap objects +// runtime.go.mem.heap_idle (bytes) Bytes in idle (unused) spans +// runtime.go.mem.heap_inuse (bytes) Bytes in in-use spans +// runtime.go.mem.heap_objects - Number of allocated heap objects +// runtime.go.mem.heap_released (bytes) Bytes of idle spans whose physical memory has been returned to the OS +// runtime.go.mem.heap_sys (bytes) Bytes of heap memory obtained from the OS +// runtime.go.mem.live_objects - Number of live objects is the number of cumulative Mallocs - Frees +// runtime.uptime (ms) Milliseconds since application was initialized func Start(opts ...Option) error { c := newConfig(opts...) meter := c.MeterProvider.Meter( diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go index cce12966d25..b51e5249bcc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/runtime/version.go @@ -5,6 +5,6 @@ package runtime // import "go.opentelemetry.io/contrib/instrumentation/runtime" // Version is the current release version of the runtime instrumentation. func Version() string { - return "0.63.0" + return "0.64.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 2b53a25e1e1..a6d0cbcc9e8 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -8,3 +8,4 @@ nam valu thirdparty addOpt +observ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index b01762ffcc7..1b1b2aff9a4 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -197,6 +197,9 @@ linters: - float-compare - go-require - require-error + usetesting: + context-background: true + context-todo: true exclusions: generated: lax presets: diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 5328505888d..994b677df7f 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,4 +1,5 @@ http://localhost +https://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects @@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual -http://4.3.2.1:78/user/123 \ No newline at end of file +http://4.3.2.1:78/user/123 +file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 +# URL works, but it has blocked link checkers. +https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index f3abcfdc2e3..ecbe0582c48 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,74 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 + +### Added + +- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) +- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. + This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) +- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. + Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) +- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) +- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) +- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) +- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) +- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) +- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. + All `Processor` implementations now include an `Enabled` method. (#7639) +- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. + The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) + +### Changed + +- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. + Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) +- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) +- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) +- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. + ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. + Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. + To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) +- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) +- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) +- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) +- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) +- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. + If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) + +### Fixed + +- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. + Attributes with duplicate keys will use the last value passed. (#7300) +- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) + +### Removed + +- Drop support for [Go 1.23]. (#7274) +- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. + The `Enabled` method has been added to the `Processor` interface instead. + All `Processor` implementations must now implement the `Enabled` method. + Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) + ## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 This release is the last to support [Go 1.23]. @@ -3430,8 +3498,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD +[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 [1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 0b3ae855c19..ff5e1f76ecd 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel (This may print some warning about "build constraints exclude all Go files", just ignore it.) -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. +Alternatively, you can use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go @@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) -This would put the project in the `opentelemetry-go` directory in -current working directory. +This will add the project as `opentelemetry-go` within the current directory. Enter the newly created directory and add your fork as a new remote: @@ -109,7 +108,7 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * At least one of the qualified approvals need to be from an + * At least one of the qualified approvals needs to be from an [Approver]/[Maintainer] affiliated with a different company than the author of the PR. * PRs introducing changes that have already been discussed and consensus @@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are +use cases are clear, but the methods to satisfy those use cases are not. As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is +conforms to the specification, but the interface and structure are flexible. It is preferable to have contributions follow the idioms of the @@ -217,7 +216,7 @@ about dependency compatibility. This project does not partition dependencies based on the environment (i.e. `development`, `staging`, `production`). -Only the dependencies explicitly included in the released modules have be +Only the dependencies explicitly included in the released modules have been tested and verified to work with the released code. No other guarantee is made about the compatibility of other dependencies. @@ -635,8 +634,8 @@ is not in their root name. The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child -and if the internal package API has changed it will fail to upgrade[^3]. +between the two modules where a user can upgrade the parent without the child, +and if the internal package API has changed, it will fail to upgrade[^3]. There are two known exceptions to this rule: @@ -657,7 +656,7 @@ this. ### Ignoring context cancellation -OpenTelemetry API implementations need to ignore the cancellation of the context that are +OpenTelemetry API implementations need to ignore the cancellation of the context that is passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). Recording methods should not return an error describing the cancellation state of the context when they complete, nor should they abort any work. @@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat should be honored. This means all work done on behalf of the user provided context should be canceled. +### Observability + +OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. +This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. + +This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. + +#### Environment Variable Activation + +Observability features are currently experimental. +They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. +This follows the established experimental feature pattern used throughout the SDK. + +Components should check for this environment variable using a consistent pattern: + +```go +import "go.opentelemetry.io/otel/*/internal/x" + +if x.Observability.Enabled() { + // Initialize observability metrics +} +``` + +**References**: + +- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) +- [sdk](./sdk/internal/x/x.go) + +#### Encapsulation + +Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). +It should not be mixed into the instrumented component. + +Prefer this: + +```go +type SDKComponent struct { + inst *instrumentation +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +To this: + +```go +// ❌ Avoid this pattern. +type SDKComponent struct { + /* other SDKComponent fields... */ + + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +The instrumentation code should not bloat the code being instrumented. +Likely, this means its own file, or its own package if it is complex or reused. + +#### Initialization + +Instrumentation setup should be explicit, side-effect free, and local to the relevant component. +Avoid relying on global or implicit [side effects][side-effect] for initialization. + +Encapsulate setup in constructor functions, ensuring clear ownership and scope: + +```go +import ( + "errors" + + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +type SDKComponent struct { + inst *instrumentation +} + +func NewSDKComponent(config Config) (*SDKComponent, error) { + inst, err := newInstrumentation() + if err != nil { + return nil, err + } + return &SDKComponent{inst: inst}, nil +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} + +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + "", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + inst := &instrumentation{} + + var err, e error + inst.inflight, e = otelconv.NewSDKComponentInflight(meter) + err = errors.Join(err, e) + + inst.exported, e = otelconv.NewSDKComponentExported(meter) + err = errors.Join(err, e) + + return inst, err +} +``` + +```go +// ❌ Avoid this pattern. +func (c *Component) initObservability() { + // Initialize observability metrics + if !x.Observability.Enabled() { + return + } + + // Initialize observability metrics + c.inst = &instrumentation{/* ... */} +} +``` + +[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) + +#### Performance + +When observability is disabled there should be little to no overhead. + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if e.inst != nil { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + } + // Export spans... +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + // Export spans... +} + +func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { + if i == nil || i.inflight == nil { + return + } + i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) +} +``` + +When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. + +##### Attribute and Option Allocation Management + +Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. + +```go +var ( + attrPool = sync.Pool{ + New: func() any { + // Pre-allocate common capacity + knownCap := 8 // Adjust based on expected usage + s := make([]attribute.KeyValue, 0, knownCap) + // Return a pointer to avoid extra allocation on Put(). + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + // Return a pointer to avoid extra allocation on Put(). + return &o + }, + } +) + +func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { + attrs := attrPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // Reset. + attrPool.Put(attrs) + }() + + *attrs = append(*attrs, baseAttrs...) + // Add any dynamic attributes. + *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + set := attribute.NewSet(*attrs...) + *addOpt = append(*addOpt, metric.WithAttributeSet(set)) + + i.counter.Add(ctx, value, *addOpt...) +} +``` + +Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. +This amortizes the cost of allocation and synchronization. +Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. + +[`sync.Pool`]: https://pkg.go.dev/sync#Pool + +##### Cache common attribute sets for repeated measurements + +If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. + +```go +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} +``` + +##### Benchmarking + +Always provide benchmarks when introducing or refactoring instrumentation. +Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: + +```go +func BenchmarkExportSpans(b *testing.B) { + scenarios := []struct { + name string + obsEnabled bool + }{ + {"ObsDisabled", false}, + {"ObsEnabled", true}, + } + + for _, scenario := range scenarios { + b.Run(scenario.name, func(b *testing.B) { + b.Setenv( + "OTEL_GO_X_OBSERVABILITY", + strconv.FormatBool(scenario.obsEnabled), + ) + + exporter := NewExporter() + spans := generateTestSpans(100) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _ = exporter.ExportSpans(context.Background(), spans) + } + }) + } +} +``` + +#### Error Handling and Robustness + +Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. + +```go +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + // Use the partially initialized counter if available. + i := &instrumentation{counter: counter} + // Return any error to the caller. + return i, err +} +``` + +```go +// ❌ Avoid this pattern. +func newInstrumentation() *instrumentation { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + if err != nil { + // ❌ Do not dump the error to the OTel Handler. Return it to the + // caller. + otel.Handle(err) + // ❌ Do not return nil if we can still use the partially initialized + // counter. + return nil + } + return &instrumentation{counter: counter} +} +``` + +If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. + +#### Context Propagation + +Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // Use the provided context for observability measurements + e.inst.recordSpanExportStarted(ctx, len(spans)) + + err := e.doExport(ctx, spans) + + if err != nil { + e.inst.recordSpanExportFailed(ctx, len(spans), err) + } else { + e.inst.recordSpanExportSucceeded(ctx, len(spans)) + } + + return err +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // ❌ Do not break the context propagation. + e.inst.recordSpanExportStarted(context.Background(), len(spans)) + + err := e.doExport(ctx, spans) + + /* ... */ + + return err +} +``` + +#### Semantic Conventions Compliance + +All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). + +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go). + +##### Component Identification + +Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). + +If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. + +```go +componentType := "go.opentelemetry.io/otel/sdk/trace.Span" +``` + +```go +// ❌ Do not do this. +componentType := "trace-span" +``` + +The component name should be a stable unique identifier for the specific instance of the component. + +Use a global counter to ensure uniqueness if necessary. + +```go +// Unique 0-based ID counter for component instances. +var componentIDCounter atomic.Int64 + +// nextID returns the next unique ID for a component. +func nextID() int64 { + return componentIDCounter.Add(1) - 1 +} + +// componentName returns a unique name for the component instance. +func componentName() attribute.KeyValue { + id := nextID() + name := fmt.Sprintf("%s/%d", componentType, id) + return semconv.OTelComponentName(name) +} +``` + +The component ID will need to be resettable for deterministic testing. +If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. +See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. + +#### Testing + +Use deterministic testing with isolated state: + +```go +func TestObservability(t *testing.T) { + // Restore state after test to ensure this does not affect other tests. + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Isolate the meter provider for deterministic testing + reader := metric.NewManualReader() + meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) + otel.SetMeterProvider(meterProvider) + + // Use t.Setenv to ensure environment variable is restored after test. + t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") + + // Reset component ID counter to ensure deterministic component names. + componentIDCounter.Store(0) + + /* ... test code ... */ +} +``` + +Test order should not affect results. +Ensure that any global state (e.g. component ID counters) is reset between tests. + ## Approvers and Maintainers ### Maintainers @@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt ### Triagers - [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). @@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) +- [Cheng-Zhen Yang](https://github.com/scorpionknifes) - [Chester Cheung](https://github.com/hanyuancheung) - [Evan Torrie](https://github.com/evantorrie) - [Gustavo Silva Paiva](https://github.com/paivagustavo) diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index bc0f1f92d1f..44870248c32 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -146,11 +146,12 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short +test-fuzz: ARGS=-fuzztime=10s -fuzz test-verbose: ARGS=-v -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 6b7ab5f2193..c6335954311 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -55,25 +55,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | -| Ubuntu | 1.23 | amd64 | | Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | -| Ubuntu | 1.23 | 386 | | Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | -| Ubuntu | 1.23 | arm64 | -| macOS 13 | 1.25 | amd64 | -| macOS 13 | 1.24 | amd64 | -| macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | amd64 | +| macOS | 1.24 | amd64 | | macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | -| macOS | 1.23 | arm64 | | Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | -| Windows | 1.23 | amd64 | | Windows | 1.25 | 386 | | Windows | 1.24 | 386 | -| Windows | 1.23 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1ddcdef0396..861756fd745 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit ## Breaking changes validation -You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. +You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). @@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` 3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` @@ -107,34 +107,50 @@ It is critical you make sure the version you push upstream is correct. ... ``` -## Release +## Sign artifacts -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. +To ensure we comply with CNCF best practices, we need to sign the release artifacts. -### Sign the Release Artifact +Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. +Both archives need to be signed with your GPG key. -To ensure we comply with CNCF best practices, we need to sign the release artifact. -The tarball attached to the GitHub release needs to be signed with your GPG key. +You can use [this script] to verify the contents of the archives before signing them. -Follow [these steps] to sign the release artifact and upload it to GitHub. -You can use [this script] to verify the contents of the tarball before signing it. +To find your GPG key ID, run: -Be sure to use the correct GPG key when signing the release artifact. +```terminal +gpg --list-secret-keys --keyid-format=long +``` + +The key ID is the 16-character string after `sec rsa4096/` (or similar). + +Set environment variables and sign both artifacts: ```terminal -gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +export VERSION="" # e.g., v1.32.0 +export KEY_ID="" + +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip ``` -You can verify the signature with: +You can verify the signatures with: ```terminal -gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz +gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip ``` -[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases [this script]: https://github.com/MrAlias/attest-sh +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +***IMPORTANT***: GitHub Releases are immutable once created. +You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. + ## Post-Release ### Contrib Repository @@ -160,14 +176,6 @@ This helps track what changes were included in each release. Once all related issues and PRs have been added to the milestone, close the milestone. -### Demo Repository - -Bump the dependencies in the following Go services: - -- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) - ### Close the `Version Release` issue Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c166..b27c9e84f51 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -83,7 +83,7 @@ is designed so the following goals can be achieved. in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. + alerts and dashboards. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6333d34b310..6cc1a1655cf 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -16,7 +16,7 @@ type ( // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. + // its Iterator. This result may be cached by an attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go new file mode 100644 index 00000000000..6aa69aeaecf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" +) + +// Type identifiers. These identifiers are hashed before the value of the +// corresponding type. This is done to distinguish values that are hashed with +// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and +// int64(0)). +// +// These are all 8 byte length strings converted to a uint64 representation. A +// uint64 is used instead of the string directly as an optimization, it avoids +// the for loop in [xxhash] which adds minor overhead. +const ( + boolID uint64 = 7953749933313450591 // "_boolean" (little endian) + int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) + float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) + stringID uint64 = 6874584755375207263 // "_string_" (little endian) + boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) + int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) + float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) + stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) +) + +// hashKVs returns a new xxHash64 hash of kvs. +func hashKVs(kvs []KeyValue) uint64 { + h := xxhash.New() + for _, kv := range kvs { + h = hashKV(h, kv) + } + return h.Sum64() +} + +// hashKV returns the xxHash64 hash of kv with h as the base. +func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { + h = h.String(string(kv.Key)) + + switch kv.Value.Type() { + case BOOL: + h = h.Uint64(boolID) + h = h.Uint64(kv.Value.numeric) + case INT64: + h = h.Uint64(int64ID) + h = h.Uint64(kv.Value.numeric) + case FLOAT64: + h = h.Uint64(float64ID) + // Assumes numeric stored with math.Float64bits. + h = h.Uint64(kv.Value.numeric) + case STRING: + h = h.Uint64(stringID) + h = h.String(kv.Value.stringly) + case BOOLSLICE: + h = h.Uint64(boolSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Bool(rv.Index(i).Bool()) + } + case INT64SLICE: + h = h.Uint64(int64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Int64(rv.Index(i).Int()) + } + case FLOAT64SLICE: + h = h.Uint64(float64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Float64(rv.Index(i).Float()) + } + case STRINGSLICE: + h = h.Uint64(stringSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.String(rv.Index(i).String()) + } + case INVALID: + default: + // Logging is an alternative, but using the internal logger here + // causes an import cycle so it is not done. + v := kv.Value.AsInterface() + msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) + panic(msg) + } + return h +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go new file mode 100644 index 00000000000..113a978383b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package xxhash provides a wrapper around the xxhash library for attribute hashing. +package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" + +import ( + "encoding/binary" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. +type Hash struct { + d *xxhash.Digest +} + +// New returns a new initialized xxHash64 hasher. +func New() Hash { + return Hash{d: xxhash.New()} +} + +func (h Hash) Uint64(val uint64) Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + // errors from Write are always nil for xxhash + // if it returns an err then panic + _, err := h.d.Write(buf[:]) + if err != nil { + panic("xxhash write of uint64 failed: " + err.Error()) + } + return h +} + +func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. + if val { + return h.Uint64(1) + } + return h.Uint64(0) +} + +func (h Hash) Float64(val float64) Hash { + return h.Uint64(math.Float64bits(val)) +} + +func (h Hash) Int64(val int64) Hash { + return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. +} + +func (h Hash) String(val string) Hash { + // errors from WriteString are always nil for xxhash + // if it returns an err then panic + _, err := h.d.WriteString(val) + if err != nil { + panic("xxhash write of string failed: " + err.Error()) + } + return h +} + +// Sum64 returns the current hash value. +func (h Hash) Sum64() uint64 { + return h.d.Sum64() +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 64735d382ea..911d557ee54 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -9,6 +9,8 @@ import ( "reflect" "slices" "sort" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" ) type ( @@ -23,19 +25,19 @@ type ( // the Equals method to ensure stable equivalence checking. // // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. + // instead of a Set directly. Set has relatively poor performance when used + // as a map key compared to Distinct. Set struct { - equivalent Distinct + hash uint64 + data any } - // Distinct is a unique identifier of a Set. + // Distinct is an identifier of a Set which is very likely to be unique. // - // Distinct is designed to ensure equivalence stability: comparisons will - // return the same value across versions. For this reason, Distinct should - // always be used as a map key instead of a Set. + // Distinct should be used as a map key instead of a Set for to provide better + // performance for map operations. Distinct struct { - iface any + hash uint64 } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -46,15 +48,34 @@ type ( Sortable []KeyValue ) +// Compile time check these types remain comparable. +var ( + _ = isComparable(Set{}) + _ = isComparable(Distinct{}) +) + +func isComparable[T comparable](t T) T { return t } + var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, + // emptyHash is the hash of an empty set. + emptyHash = xxhash.New().Sum64() + + // userDefinedEmptySet is an empty set. It was mistakenly exposed to users + // as something they can assign to, so it must remain addressable and + // mutable. + // + // This is kept for backwards compatibility, but should not be used in new code. + userDefinedEmptySet = &Set{ + hash: emptyHash, + data: [0]KeyValue{}, + } + + emptySet = Set{ + hash: emptyHash, + data: [0]KeyValue{}, } ) @@ -62,33 +83,35 @@ var ( // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) + // Continue to return the pointer to the user-defined empty set for + // backwards-compatibility. + // + // New code should not use this, instead use emptySet. + return userDefinedEmptySet } // Valid reports whether this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil +func (d Distinct) Valid() bool { return d.hash != 0 } + +// reflectValue abbreviates reflect.ValueOf(d). +func (l Set) reflectValue() reflect.Value { + return reflect.ValueOf(l.data) } // Len returns the number of attributes in this set. func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return 0 } - return l.equivalent.reflectValue().Len() + return l.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return KeyValue{}, false } - value := l.equivalent.reflectValue() + value := l.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return Value{}, false } - rValue := l.equivalent.reflectValue() + rValue := l.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue { return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any +// Equivalent returns a value that may be used as a map key. Equal Distinct +// values are very likely to be equivalent attribute Sets. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent + if l == nil || l.hash == 0 { + return Distinct{hash: emptySet.hash} } - return l.equivalent + return Distinct{hash: l.hash} } // Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() + if l.Equivalent() != o.Equivalent() { + return false + } + if l == nil || l.hash == 0 { + l = &emptySet + } + if o == nil || o.hash == 0 { + o = &emptySet + } + return l.data == o.data } // Encoded returns the encoded form of this set, according to encoder. @@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string { return encoder.Encode(l.Iter()) } -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // @@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { - return empty(), nil + return emptySet, nil } // Stable sort so the following de-duplication can implement @@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if filter != nil { if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + return newSet(kvs[div:]), kvs[:div] } } - return Set{equivalent: computeDistinct(kvs)}, nil + return newSet(kvs), nil } // NewSetWithSortableFiltered returns a new Set. @@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { if first == 0 { // It is safe to assume len(slice) >= 1 given we found at least one // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + return newSet(slice[1:]), slice[:1] } // Move the filtered slice[first] to the front (preserving order). @@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { // Do not re-evaluate re(slice[first+1:]). div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] + return newSet(slice[div:]), slice[:div] } -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) +// newSet returns a new set based on the sorted and uniqued kvs. +func newSet(kvs []KeyValue) Set { + s := Set{ + hash: hashKVs(kvs), + data: computeDataFixed(kvs), } - return Distinct{ - iface: iface, + if s.data == nil { + s.data = computeDataReflect(kvs) } + return s } -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) any { +// computeDataFixed computes a Set data for small slices. It returns nil if the +// input is too large for this code path. +func computeDataFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any { } } -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) any { +// computeDataReflect computes a Set data using reflection, works for any size +// input. +func computeDataReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any { // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) + return json.Marshal(l.data) } // MarshalLog is the marshaling function used by the logging system to represent this Set. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index e584b24776b..24f1fa37dbe 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Type_index)-1 { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Type_name[_Type_index[i]:_Type_index[i+1]] + return _Type_name[_Type_index[idx]:_Type_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index f83a448ec61..78e98c4c0f3 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // If we couldn't find any valid key character, // it means the key is either empty or invalid. if keyStart == keyEnd { - return + return p, ok } // Skip spaces after the key: " key< >= value ". @@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // A key can have no value, like: " key ". ok = true p.key = s[keyStart:keyEnd] - return + return p, ok } // If we have not reached the end and we can't find the '=' delimiter, // it means the property is invalid. if s[index] != keyValueDelimiter[0] { - return + return p, ok } // Attempting to parse the value. @@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // we have not reached the end, it means the property is // invalid, something like: " key = value value1". if index != len(s) { - return + return p, ok } // Decode a percent-encoded value. rawVal := s[valueStart:valueEnd] unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return + return p, ok } value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) @@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { p.hasValue = true p.value = value - return + return p, ok } func skipSpace(s string, offset int) int { diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index a311fbb4835..cadb87cc0ee 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver +FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 492480f8c9a..756bf7964c4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -16,7 +16,6 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" @@ -101,7 +100,7 @@ func (c *client) Shutdown(ctx context.Context) error { // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. -func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { +func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) (uploadErr error) { // The otlpmetric.Exporter synchronizes access to client methods, and // ensures this is not called after the Exporter is shutdown. Only thing // to do here is send data. @@ -116,7 +115,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou ctx, cancel := c.exportContext(ctx) defer cancel() - return c.requestFunc(ctx, func(iCtx context.Context) error { + return errors.Join(uploadErr, c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, }) @@ -124,8 +123,8 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedDataPoints() if n != 0 || msg != "" { - err := internal.MetricPartialSuccessError(n, msg) - otel.Handle(err) + e := internal.MetricPartialSuccessError(n, msg) + uploadErr = errors.Join(uploadErr, e) } } // nil is converted to OK. @@ -134,7 +133,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou return nil } return err - }) + })) } // exportContext returns a copy of parent with an appropriate deadline and diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go index b29cd11a660..f6e4f0d0764 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go @@ -20,7 +20,7 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/o //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go -//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go index b54a173b6ea..7f947273b5e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go @@ -18,7 +18,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // DefaultEnvOptionsReader is the default environments reader. @@ -165,11 +164,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "cumulative": - fn(cumulativeTemporality) + fn(metric.CumulativeTemporalitySelector) case "delta": - fn(deltaTemporality) + fn(metric.DeltaTemporalitySelector) case "lowmemory": - fn(lowMemory) + fn(metric.LowMemoryTemporalitySelector) default: global.Warn( "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", @@ -181,28 +180,6 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) } } -func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { - return metricdata.CumulativeTemporality -} - -func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { - switch ik { - case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { - switch ik { - case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go index 6af5591ea7e..243abcb1718 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go index 80691ac3a9f..c14e5ae78a0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go @@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index 7909cac56d9..1e2446fe483 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 26af47e621b..76381b56ee8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -22,7 +22,6 @@ import ( metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/protobuf/proto" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" @@ -122,7 +121,7 @@ func (c *client) Shutdown(ctx context.Context) error { // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. -func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { +func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) (uploadErr error) { // The otlpmetric.Exporter synchronizes access to client methods, and // ensures this is not called after the Exporter is shutdown. Only thing // to do here is send data. @@ -139,7 +138,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou return err } - return c.requestFunc(ctx, func(iCtx context.Context) error { + return errors.Join(uploadErr, c.requestFunc(ctx, func(iCtx context.Context) error { select { case <-iCtx.Done(): return iCtx.Err() @@ -158,7 +157,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou if resp != nil && resp.Body != nil { defer func() { if err := resp.Body.Close(); err != nil { - otel.Handle(err) + uploadErr = errors.Join(uploadErr, err) } }() } @@ -186,7 +185,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou n := respProto.PartialSuccess.GetRejectedDataPoints() if n != 0 || msg != "" { err := internal.MetricPartialSuccessError(n, msg) - otel.Handle(err) + uploadErr = errors.Join(uploadErr, err) } } } @@ -219,7 +218,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou // Non-retryable failure. return fmt.Errorf("failed to send metrics to %s: %s (%w)", request.URL, resp.Status, bodyErr) } - }) + })) } var gzPool = sync.Pool{ diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go index 8849f341ada..a8b709268d1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go @@ -20,7 +20,7 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/o //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go -//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go index ef318ac676c..69224c97de8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go @@ -18,7 +18,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // DefaultEnvOptionsReader is the default environments reader. @@ -165,11 +164,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "cumulative": - fn(cumulativeTemporality) + fn(metric.CumulativeTemporalitySelector) case "delta": - fn(deltaTemporality) + fn(metric.DeltaTemporalitySelector) case "lowmemory": - fn(lowMemory) + fn(metric.LowMemoryTemporalitySelector) default: global.Warn( "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", @@ -181,28 +180,6 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) } } -func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { - return metricdata.CumulativeTemporality -} - -func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { - switch ik { - case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { - switch ik { - case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go index c3b57c57cfb..04190f5301e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go index 8a5fa80eac6..b4b5c013d4c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go @@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index b8fe7cb2901..8f6179b390b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index 379bc8170df..d431fc4517d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -113,7 +113,7 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { if psid := sd.Parent().SpanID(); psid.IsValid() { s.ParentSpanId = psid[:] } - s.Flags = buildSpanFlags(sd.Parent()) + s.Flags = buildSpanFlagsWith(sd.SpanContext().TraceFlags(), sd.Parent()) return s } @@ -159,7 +159,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() - flags := buildSpanFlags(otLink.SpanContext) + flags := buildSpanFlagsWith(otLink.SpanContext.TraceFlags(), otLink.SpanContext) sl = append(sl, &tracepb.Span_Link{ TraceId: tid[:], @@ -172,13 +172,15 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { return sl } -func buildSpanFlags(sc trace.SpanContext) uint32 { - flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK - if sc.IsRemote() { - flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK +func buildSpanFlagsWith(tf trace.TraceFlags, parent trace.SpanContext) uint32 { + // Lower 8 bits are the W3C TraceFlags; always indicate that we know whether the parent is remote + flags := uint32(tf) | uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) + // Set the parent-is-remote bit when applicable + if parent.IsRemote() { + flags |= uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) } - return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative + return flags // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 4b4cc76f4a9..76b7cd461bf 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -17,9 +17,10 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" ) @@ -45,6 +46,9 @@ type client struct { conn *grpc.ClientConn tscMu sync.RWMutex tsc coltracepb.TraceServiceClient + + instID int64 + inst *observ.Instrumentation } // Compile time check *client implements otlptrace.Client. @@ -68,6 +72,7 @@ func newClient(opts ...Option) *client { stopCtx: ctx, stopFunc: cancel, conn: cfg.GRPCConn, + instID: counter.NextExporterID(), } if len(cfg.Traces.Headers) > 0 { @@ -92,13 +97,24 @@ func (c *client) Start(context.Context) error { c.conn = conn } + // Initialize the instrumentation if not already done. + // + // Initialize here instead of NewClient to allow any errors to be passed + // back to the caller and so that any setup of the environment variables to + // enable instrumentation can be set via code. + var err error + if c.inst == nil { + target := c.conn.CanonicalTarget() + c.inst, err = observ.NewInstrumentation(c.instID, target) + } + // The otlptrace.Client interface states this method is called just once, // so no need to check if already started. c.tscMu.Lock() c.tsc = coltracepb.NewTraceServiceClient(c.conn) c.tscMu.Unlock() - return nil + return err } var errAlreadyStopped = errors.New("the client is already stopped") @@ -174,7 +190,7 @@ var errShutdown = errors.New("the client is shutdown") // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. -func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) (uploadErr error) { // Hold a read lock to ensure a shut down initiated after this starts does // not abandon the export. This read lock acquire has less priority than a // write lock acquire (i.e. Stop), meaning if the client is shutting down @@ -189,6 +205,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc ctx, cancel := c.exportContext(ctx) defer cancel() + var code codes.Code + if c.inst != nil { + op := c.inst.ExportSpans(ctx, len(protoSpans)) + defer func() { op.End(uploadErr, code) }() + } + return c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, @@ -197,16 +219,17 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedSpans() if n != 0 || msg != "" { - err := internal.TracePartialSuccessError(n, msg) - otel.Handle(err) + e := internal.TracePartialSuccessError(n, msg) + uploadErr = errors.Join(uploadErr, e) } } // nil is converted to OK. - if status.Code(err) == codes.OK { + code = status.Code(err) + if code == codes.OK { // Success. - return nil + return uploadErr } - return err + return errors.Join(uploadErr, err) }) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go new file mode 100644 index 00000000000..323b2a2c9a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go index b6e6b10fbf1..7fe9c9f3a33 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -23,3 +23,12 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go + +//go:generate gotmpl --body=../../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\" }" --out=x/x.go +//go:generate gotmpl --body=../../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target.go.tmpl "--data={ \"pkg\": \"observ\", \"pkg_path\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ\" }" --out=observ/target.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target_test.go.tmpl "--data={ \"pkg\": \"observ\" }" --out=observ/target_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go new file mode 100644 index 00000000000..0dd54e4b969 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// otlptracegrpc exporter. +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go new file mode 100644 index 00000000000..2257fcc865f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/codes" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 + // server.addr + 1 + // server.port + 1 + // error.type + 1 // rpc.grpc.status_code + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the exporter with the +// provided ID. +func ComponentName(id int64) string { + t := semconv.OTelComponentTypeOtlpGRPCSpanExporter.Value.AsString() + return fmt.Sprintf("%s/%d", t, id) +} + +// Instrumentation is experimental instrumentation for the exporter. +type Instrumentation struct { + inflightSpans metric.Int64UpDownCounter + exportedSpans metric.Int64Counter + opDuration metric.Float64Histogram + + attrs []attribute.KeyValue + addOpt metric.AddOption + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for an OTLP over gPRC trace +// exporter with the provided ID using the global MeterProvider. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The target is the endpoint the exporter is exporting to. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(id int64, target string) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + attrs := BaseAttrs(id, target) + i := &Instrumentation{ + attrs: attrs, + addOpt: metric.WithAttributeSet(attribute.NewSet(attrs...)), + + // Do not modify attrs (NewSet sorts in-place), make a new slice. + recOpt: metric.WithAttributeSet(attribute.NewSet(append( + // Default to OK status code. + []attribute.KeyValue{semconv.RPCGRPCStatusCodeOk}, + attrs..., + )...)), + } + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(SchemaURL), + ) + + var err error + + inflightSpans, e := otelconv.NewSDKExporterSpanInflight(m) + if e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + i.inflightSpans = inflightSpans.Inst() + + exportedSpans, e := otelconv.NewSDKExporterSpanExported(m) + if e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + i.exportedSpans = exportedSpans.Inst() + + opDuration, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.opDuration = opDuration.Inst() + + return i, err +} + +// BaseAttrs returns the base attributes for the exporter with the provided ID +// and target. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The target is the gRPC target the exporter is exporting to. It is expected +// to be the output of the Client's CanonicalTarget method. +func BaseAttrs(id int64, target string) []attribute.KeyValue { + host, port, err := ParseCanonicalTarget(target) + if err != nil || (host == "" && port < 0) { + if err != nil { + global.Debug("failed to parse target", "target", target, "error", err) + } + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + } + } + + // Do not use append so the slice is exactly allocated. + + if port < 0 { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerAddress(host), + } + } + + if host == "" { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerPort(port), + } + } + + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpGRPCSpanExporter, + semconv.ServerAddress(host), + semconv.ServerPort(port), + } +} + +// ExportSpans instruments the ExportSpans method of the exporter. It returns +// an [ExportOp] that must have its [ExportOp.End] method called when the +// ExportSpans method returns. +func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { + start := time.Now() + + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + + return ExportOp{ + ctx: ctx, + start: start, + nSpans: int64(nSpans), + inst: i, + } +} + +// ExportOp tracks the operation being observed by [Instrumentation.ExportSpans]. +type ExportOp struct { + ctx context.Context + start time.Time + nSpans int64 + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.ExportSpans]. +// +// Any error that is encountered is provided as err. +// +// If err is not nil, all spans will be recorded as failures unless error is of +// type [internal.PartialSuccess]. In the case of a PartialSuccess, the number +// of successfully exported spans will be determined by inspecting the +// RejectedItems field of the PartialSuccess. +func (e ExportOp) End(err error, code codes.Code) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.addOpt) + + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + + success := successful(e.nSpans, err) + // Record successfully exported spans, even if the value is 0 which are + // meaningful to distribution aggregations. + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, e.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + o := metric.WithAttributeSet(attribute.NewSet(*attrs...)) + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], o) + + e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) + } + + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err, code)) + + d := time.Since(e.start).Seconds() + e.inst.opDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the operation being recorded. +// +// If err is nil and code is codes.OK, the default recOpt of the +// Instrumentation is returned. +// +// If err is not nil or code is not codes.OK, a new RecordOption is returned +// with the base attributes of the Instrumentation plus the rpc.grpc.status_code +// attribute set to the provided code, and if err is not nil, the error.type +// attribute set to the type of the error. +func (i *Instrumentation) recordOption(err error, code codes.Code) metric.RecordOption { + if err == nil && code == codes.OK { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + + c := int64(code) // uint32 -> int64. + *attrs = append(*attrs, semconv.RPCGRPCStatusCodeKey.Int64(c)) + if err != nil { + *attrs = append(*attrs, semconv.ErrorType(err)) + } + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} + +// successful returns the number of successfully exported spans out of the n +// that were exported based on the provided error. +// +// If err is nil, n is returned. All spans were successfully exported. +// +// If err is not nil and not an [internal.PartialSuccess] error, 0 is returned. +// It is assumed all spans failed to be exported. +// +// If err is an [internal.PartialSuccess] error, the number of successfully +// exported spans is computed by subtracting the RejectedItems field from n. If +// RejectedItems is negative, n is returned. If RejectedItems is greater than +// n, 0 is returned. +func successful(n int64, err error) int64 { + if err == nil { + return n // All spans successfully exported. + } + // Split rejection calculation so successful is inlinable. + return n - rejected(n, err) +} + +var errPartialPool = &sync.Pool{ + New: func() any { return new(internal.PartialSuccess) }, +} + +// rejected returns how many out of the n spans exporter were rejected based on +// the provided non-nil err. +func rejected(n int64, err error) int64 { + ps := errPartialPool.Get().(*internal.PartialSuccess) + defer errPartialPool.Put(ps) + // Check for partial success. + if errors.As(err, ps) { + // Bound RejectedItems to [0, n]. This should not be needed, + // but be defensive as this is from an external source. + return min(max(ps.RejectedItems, 0), n) + } + return n // All spans rejected. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go new file mode 100644 index 00000000000..34eee27db0d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go @@ -0,0 +1,143 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/observ/target.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ" + +import ( + "errors" + "fmt" + "net" + "net/netip" + "strconv" + "strings" +) + +const ( + schemeUnix = "unix" + schemeUnixAbstract = "unix-abstract" +) + +// ParseCanonicalTarget parses a target string and returns the extracted host +// (domain address or IP), the target port, or an error. +// +// If no port is specified, -1 is returned. +// +// If no host is specified, an empty string is returned. +// +// The target string is expected to always have the form +// "://[authority]/". For example: +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" +// - "unix-abstract:///socket-name" +// - "passthrough:///192.34.2.1:42" +// +// The target is expected to come from the CanonicalTarget method of a gRPC +// Client. +func ParseCanonicalTarget(target string) (string, int, error) { + const sep = "://" + + // Find scheme. Do not allocate the string by using url.Parse. + idx := strings.Index(target, sep) + if idx == -1 { + return "", -1, fmt.Errorf("invalid target %q: missing scheme", target) + } + scheme, endpoint := target[:idx], target[idx+len(sep):] + + // Check for unix schemes. + if scheme == schemeUnix || scheme == schemeUnixAbstract { + return parseUnix(endpoint) + } + + // Strip leading slash and any authority. + if i := strings.Index(endpoint, "/"); i != -1 { + endpoint = endpoint[i+1:] + } + + // DNS, passthrough, and custom resolvers. + return parseEndpoint(endpoint) +} + +// parseUnix parses unix socket targets. +func parseUnix(endpoint string) (string, int, error) { + // Format: unix[-abstract]://path + // + // We should have "/path" (empty authority) if valid. + if len(endpoint) >= 1 && endpoint[0] == '/' { + // Return the full path including leading slash. + return endpoint, -1, nil + } + + // If there's no leading slash, it means there might be an authority + // Check for authority case (should error): "authority/path" + if slashIdx := strings.Index(endpoint, "/"); slashIdx > 0 { + return "", -1, fmt.Errorf("invalid (non-empty) authority: %s", endpoint[:slashIdx]) + } + + return "", -1, errors.New("invalid unix target format") +} + +// parseEndpoint parses an endpoint from a gRPC target. +// +// It supports the following formats: +// - "host" +// - "host%zone" +// - "host:port" +// - "host%zone:port" +// - "ipv4" +// - "ipv4%zone" +// - "ipv4:port" +// - "ipv4%zone:port" +// - "ipv6" +// - "ipv6%zone" +// - "[ipv6]" +// - "[ipv6%zone]" +// - "[ipv6]:port" +// - "[ipv6%zone]:port" +// +// It returns the host or host%zone (domain address or IP), the port (or -1 if +// not specified), or an error if the input is not a valid. +func parseEndpoint(endpoint string) (string, int, error) { + // First check if the endpoint is just an IP address. + if ip := parseIP(endpoint); ip != "" { + return ip, -1, nil + } + + // If there's no colon, there is no port (IPv6 with no port checked above). + if !strings.Contains(endpoint, ":") { + return endpoint, -1, nil + } + + host, portStr, err := net.SplitHostPort(endpoint) + if err != nil { + return "", -1, fmt.Errorf("invalid host:port %q: %w", endpoint, err) + } + + const base, bitSize = 10, 16 + port16, err := strconv.ParseUint(portStr, base, bitSize) + if err != nil { + return "", -1, fmt.Errorf("invalid port %q: %w", portStr, err) + } + port := int(port16) // port is guaranteed to be in the range [0, 65535]. + + return host, port, nil +} + +// parseIP attempts to parse the entire endpoint as an IP address. +// It returns the normalized string form of the IP if successful, +// or an empty string if parsing fails. +func parseIP(ip string) string { + // Strip leading and trailing brackets for IPv6 addresses. + if len(ip) >= 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ip = ip[1 : len(ip)-1] + } + addr, err := netip.ParseAddr(ip) + if err != nil { + return "" + } + // Return the normalized string form of the IP. + return addr.String() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go index 1c465942336..a811a07b352 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 259a898ae77..a7b8e81a786 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go new file mode 100644 index 00000000000..e2d7cee1e17 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +// Version is the current release version of the OpenTelemetry OTLP gRPC trace +// exporter in use. +const Version = "1.39.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md new file mode 100644 index 00000000000..15a3011bff3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md @@ -0,0 +1,36 @@ +# Experimental Features + +The `otlptracegrpc` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `otlptracegrpc` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Observability](#observability) + +### Observability + +The `otlptracegrpc` exporter provides a observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.exporter.span.inflight` +- `otel.sdk.exporter.span.exported` +- `otel.sdk.exporter.operation.duration` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.37.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go new file mode 100644 index 00000000000..4e89c6524f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go similarity index 55% rename from vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go index 2fcbbcc66ec..741ba62c97f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go @@ -1,45 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. -package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc]. +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x" import ( "os" - "strings" ) -// SelfObservability is an experimental feature flag that determines if SDK -// self-observability metrics are enabled. -// -// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -49,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } // Enabled reports whether the feature is enabled. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index c7b1a551498..d0688c2016f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -22,9 +22,10 @@ import ( tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/protobuf/proto" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" ) @@ -63,6 +64,9 @@ type client struct { client *http.Client stopCh chan struct{} stopOnce sync.Once + + instID int64 + inst *observ.Instrumentation } var _ otlptrace.Client = (*client)(nil) @@ -100,18 +104,29 @@ func NewClient(opts ...Option) otlptrace.Client { requestFunc: cfg.RetryConfig.RequestFunc(evaluate), stopCh: stopCh, client: httpClient, + instID: counter.NextExporterID(), } } // Start does nothing in a HTTP client. -func (*client) Start(ctx context.Context) error { +func (c *client) Start(ctx context.Context) error { + // Initialize the instrumentation if not already done. + // + // Initialize here instead of NewClient to allow any errors to be passed + // back to the caller and so that any setup of the environment variables to + // enable instrumentation can be set via code. + var err error + if c.inst == nil { + c.inst, err = observ.NewInstrumentation(c.instID, c.cfg.Endpoint) + } + // nothing to do select { case <-ctx.Done(): - return ctx.Err() + err = errors.Join(err, ctx.Err()) default: } - return nil + return err } // Stop shuts down the client and interrupt any in-flight request. @@ -128,7 +143,7 @@ func (d *client) Stop(ctx context.Context) error { } // UploadTraces sends a batch of spans to the collector. -func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { +func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) (uploadErr error) { pbRequest := &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, } @@ -145,7 +160,13 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc return err } - return d.requestFunc(ctx, func(ctx context.Context) error { + var statusCode int + if d.inst != nil { + op := d.inst.ExportSpans(ctx, len(protoSpans)) + defer func() { op.End(uploadErr, statusCode) }() + } + + return errors.Join(uploadErr, d.requestFunc(ctx, func(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() @@ -165,12 +186,13 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc if resp != nil && resp.Body != nil { defer func() { if err := resp.Body.Close(); err != nil { - otel.Handle(err) + uploadErr = errors.Join(uploadErr, err) } }() } - if sc := resp.StatusCode; sc >= 200 && sc <= 299 { + statusCode = resp.StatusCode + if statusCode >= 200 && statusCode <= 299 { // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer @@ -192,7 +214,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc n := respProto.PartialSuccess.GetRejectedSpans() if n != 0 || msg != "" { err := internal.TracePartialSuccessError(n, msg) - otel.Handle(err) + uploadErr = errors.Join(uploadErr, err) } } } @@ -214,7 +236,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc } bodyErr := fmt.Errorf("body: %s", respStr) - switch resp.StatusCode { + switch statusCode { case http.StatusTooManyRequests, http.StatusBadGateway, http.StatusServiceUnavailable, @@ -225,7 +247,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc // Non-retryable failure. return fmt.Errorf("failed to send to %s: %s (%w)", request.URL, resp.Status, bodyErr) } - }) + })) } func (d *client) newRequest(body []byte) (request, error) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter/counter.go new file mode 100644 index 00000000000..75e5c0c7ed3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go index 3d344dc85ee..ca387432fd5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go @@ -23,3 +23,9 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go + +//go:generate gotmpl --body=../../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp\" }" --out=x/x.go + +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go +//go:generate gotmpl --body=../../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go new file mode 100644 index 00000000000..326ef0f8799 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ/instrumentation.go @@ -0,0 +1,397 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// otlptracehttp exporter. +package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ" + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "net/netip" + "strconv" + "strings" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 + // server.addr + 1 + // server.port + 1 + // error.type + 1 // http.response.status_code + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the exporter with the +// provided ID. +func ComponentName(id int64) string { + t := semconv.OTelComponentTypeOtlpHTTPSpanExporter.Value.AsString() + return fmt.Sprintf("%s/%d", t, id) +} + +// Instrumentation is experimental instrumentation for the exporter. +type Instrumentation struct { + inflightSpans metric.Int64UpDownCounter + exportedSpans metric.Int64Counter + opDuration metric.Float64Histogram + + attrs []attribute.KeyValue + addOpt metric.AddOption + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for an OTLP over HTTP trace +// exporter with the provided ID and endpoint. It uses the global +// MeterProvider to create the instrumentation. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The endpoint is the HTTP endpoint the exporter is exporting to. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(id int64, endpoint string) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + attrs := BaseAttrs(id, endpoint) + i := &Instrumentation{ + attrs: attrs, + addOpt: metric.WithAttributeSet(attribute.NewSet(attrs...)), + + // Do not modify attrs (NewSet sorts in-place), make a new slice. + recOpt: metric.WithAttributeSet(attribute.NewSet(append( + // Default to OK status code (200). + []attribute.KeyValue{semconv.HTTPResponseStatusCode(http.StatusOK)}, + attrs..., + )...)), + } + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(SchemaURL), + ) + + var err error + + inflightSpans, e := otelconv.NewSDKExporterSpanInflight(m) + if e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + i.inflightSpans = inflightSpans.Inst() + + exportedSpans, e := otelconv.NewSDKExporterSpanExported(m) + if e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + i.exportedSpans = exportedSpans.Inst() + + opDuration, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.opDuration = opDuration.Inst() + + return i, err +} + +// BaseAttrs returns the base attributes for the exporter with the provided ID +// and endpoint. +// +// The id should be the unique exporter instance ID. It is used +// to set the "component.name" attribute. +// +// The endpoint is the HTTP endpoint the exporter is exporting to. It should be +// in the format "host:port" or a full URL. +func BaseAttrs(id int64, endpoint string) []attribute.KeyValue { + host, port, err := parseEndpoint(endpoint) + if err != nil || (host == "" && port < 0) { + if err != nil { + global.Debug("failed to parse endpoint", "endpoint", endpoint, "error", err) + } + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpHTTPSpanExporter, + } + } + + // Do not use append so the slice is exactly allocated. + + if port < 0 { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpHTTPSpanExporter, + semconv.ServerAddress(host), + } + } + + if host == "" { + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpHTTPSpanExporter, + semconv.ServerPort(port), + } + } + + return []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeOtlpHTTPSpanExporter, + semconv.ServerAddress(host), + semconv.ServerPort(port), + } +} + +// parseEndpoint parses the host and port from endpoint that has the form +// "host[:port]", or it returns an error if the endpoint is not parsable. +// +// If no port is specified, -1 is returned. +// +// If no host is specified, an empty string is returned. +func parseEndpoint(endpoint string) (string, int, error) { + // First check if the endpoint is just an IP address. + if ip := parseIP(endpoint); ip != "" { + return ip, -1, nil + } + + // If there's no colon, there is no port (IPv6 with no port checked above). + if !strings.Contains(endpoint, ":") { + return endpoint, -1, nil + } + + // Otherwise, parse as host:port. + host, portStr, err := net.SplitHostPort(endpoint) + if err != nil { + return "", -1, fmt.Errorf("invalid host:port %q: %w", endpoint, err) + } + + const base, bitSize = 10, 16 + port16, err := strconv.ParseUint(portStr, base, bitSize) + if err != nil { + return "", -1, fmt.Errorf("invalid port %q: %w", portStr, err) + } + port := int(port16) // port is guaranteed to be in the range [0, 65535]. + + return host, port, nil +} + +// parseIP attempts to parse the entire endpoint as an IP address. +// It returns the normalized string form of the IP if successful, +// or an empty string if parsing fails. +func parseIP(ip string) string { + // Strip leading and trailing brackets for IPv6 addresses. + if len(ip) >= 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ip = ip[1 : len(ip)-1] + } + addr, err := netip.ParseAddr(ip) + if err != nil { + return "" + } + // Return the normalized string form of the IP. + return addr.String() +} + +// ExportSpans instruments the UploadTraces method of the client. It returns an +// [ExportOp] that must have its [ExportOp.End] method called when the +// operation end. +func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { + start := time.Now() + + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.addOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + + return ExportOp{ + ctx: ctx, + start: start, + nSpans: int64(nSpans), + inst: i, + } +} + +// ExportOp tracks the export operation being observed by +// [Instrumentation.ExportSpans]. +type ExportOp struct { + ctx context.Context + start time.Time + nSpans int64 + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.ExportSpans]. +// +// Any error that is encountered is provided as err. +// The HTTP status code from the response is provided as status. +// +// If err is not nil, all spans will be recorded as failures unless error is of +// type [internal.PartialSuccess]. In the case of a PartialSuccess, the number +// of successfully exported spans will be determined by inspecting the +// RejectedItems field of the PartialSuccess. +func (e ExportOp) End(err error, status int) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.addOpt) + + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + + success := successful(e.nSpans, err) + // Record successfully exported spans, even if the value is 0 which are + // meaningful to distribution aggregations. + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, e.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + o := metric.WithAttributeSet(attribute.NewSet(*attrs...)) + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], o) + + e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) + } + + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err, status)) + + d := time.Since(e.start).Seconds() + e.inst.opDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the operation being recorded. +// +// If err is nil and status is 200, the default recOpt of the +// Instrumentation is returned. +// +// Otherwise, a new RecordOption is returned with the base attributes of the +// Instrumentation plus the http.response.status_code attribute set to the +// provided status, and if err is not nil, the error.type attribute set +// to the type of the error. +func (i *Instrumentation) recordOption(err error, status int) metric.RecordOption { + if err == nil && status == http.StatusOK { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + + *attrs = append(*attrs, semconv.HTTPResponseStatusCode(status)) + if err != nil { + *attrs = append(*attrs, semconv.ErrorType(err)) + } + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} + +// successful returns the number of successfully exported spans out of the n +// that were exported based on the provided error. +// +// If err is nil, n is returned. All spans were successfully exported. +// +// If err is not nil and not an [internal.PartialSuccess] error, 0 is returned. +// It is assumed all spans failed to be exported. +// +// If err is an [internal.PartialSuccess] error, the number of successfully +// exported spans is computed by subtracting the RejectedItems field from n. If +// RejectedItems is negative, n is returned. If RejectedItems is greater than +// n, 0 is returned. +func successful(n int64, err error) int64 { + if err == nil { + return n // All spans successfully exported. + } + // Split rejection calculation so successful is inlinable. + return n - rejected(n, err) +} + +var errPartialPool = &sync.Pool{ + New: func() any { return new(internal.PartialSuccess) }, +} + +// rejected returns how many out of the n spans exporter were rejected based on +// the provided non-nil err. +func rejected(n int64, err error) int64 { + ps := errPartialPool.Get().(*internal.PartialSuccess) + defer errPartialPool.Put(ps) + // Check for partial success. + if errors.As(err, ps) { + // Bound RejectedItems to [0, n]. This should not be needed, + // but be defensive as this is from an external source. + return min(max(ps.RejectedItems, 0), n) + } + return n // All spans rejected. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go index 418e66428ae..33ff6c02438 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go @@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string { return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } +// As returns true if ps can be assigned to target and makes the assignment. +// Otherwise, it returns false. This supports the errors.As() interface. +func (ps PartialSuccess) As(target any) bool { + t, ok := target.(*PartialSuccess) + if !ok { + return false + } + *t = ps + return true +} + // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go index 107428fa6cf..bf39808254f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go @@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { return err } + // Check if context is canceled before attempting to wait and retry. + if ctx.Err() != nil { + return fmt.Errorf("%w: %w", ctx.Err(), err) + } + if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime { return fmt.Errorf("max retry time elapsed: %w", err) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go new file mode 100644 index 00000000000..cbd6e63230b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/version.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" + +// Version is the current release version of the OpenTelemetry OTLP HTTP trace +// exporter in use. +const Version = "1.39.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/observ.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/observ.go new file mode 100644 index 00000000000..ecae4a79cac --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/observ.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/x.go new file mode 100644 index 00000000000..099dab00710 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x/x.go @@ -0,0 +1,58 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]. +package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x" + +import ( + "os" +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + keys []string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } + return Feature[T]{ + keys: keys, + parse: parse, + } +} + +// Keys returns the environment variable keys that can be set to enable the +// feature. +func (f Feature[T]) Keys() []string { return f.keys } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } + } + return v, ok +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 3b79c1a0b5c..6838f3c4e3f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index dc3542637be..5fe28b93dfc 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -4,14 +4,10 @@ package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( - "sync" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" "github.com/prometheus/otlptranslator" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" ) @@ -28,12 +24,6 @@ type config struct { resourceAttributesFilter attribute.Filter } -var logTemporaryDefault = sync.OnceFunc(func() { - global.Warn( - "The default Prometheus naming translation strategy is planned to be changed from otlptranslator.NoUTF8EscapingWithSuffixes to otlptranslator.UnderscoreEscapingWithSuffixes in a future release. Add prometheus.WithTranslationStrategy(otlptranslator.NoUTF8EscapingWithSuffixes) to preserve the existing behavior, or prometheus.WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes) to opt into the future default behavior.", - ) -}) - // newConfig creates a validated config configured with options. func newConfig(opts ...Option) config { cfg := config{} @@ -42,27 +32,15 @@ func newConfig(opts ...Option) config { } if cfg.translationStrategy == "" { - // If no translation strategy was specified, deduce one based on the global - // NameValidationScheme. NOTE: this logic will change in the future, always - // defaulting to UnderscoreEscapingWithSuffixes - - //nolint:staticcheck // NameValidationScheme is deprecated but we still need it for now. - if model.NameValidationScheme == model.UTF8Validation { - logTemporaryDefault() - cfg.translationStrategy = otlptranslator.NoUTF8EscapingWithSuffixes - } else { - cfg.translationStrategy = otlptranslator.UnderscoreEscapingWithSuffixes - } - } else { + cfg.translationStrategy = otlptranslator.UnderscoreEscapingWithSuffixes + } else if !cfg.translationStrategy.ShouldAddSuffixes() { // Note, if the translation strategy implies that suffixes should be added, // the user can still use WithoutUnits and WithoutCounterSuffixes to // explicitly disable specific suffixes. We do not override their preference // in this case. However if the chosen strategy disables suffixes, we should // forcibly disable all of them. - if !cfg.translationStrategy.ShouldAddSuffixes() { - cfg.withoutCounterSuffixes = true - cfg.withoutUnits = true - } + cfg.withoutCounterSuffixes = true + cfg.withoutUnits = true } if cfg.registerer == nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/errors.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/errors.go new file mode 100644 index 00000000000..f2076de761e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/errors.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" + +import "errors" + +// Sentinel errors for consistent error checks in tests. +var ( + errInvalidMetricType = errors.New("invalid metric type") + errInvalidMetric = errors.New("invalid metric") + errEHScaleBelowMin = errors.New("exponential histogram scale below minimum supported") +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index 0f29c0abbde..d73786b83ed 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -20,6 +20,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/prometheus/internal/counter" + "go.opentelemetry.io/otel/exporters/prometheus/internal/observ" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" @@ -92,6 +94,8 @@ type collector struct { metricNamer otlptranslator.MetricNamer labelNamer otlptranslator.LabelNamer unitNamer otlptranslator.UnitNamer + + inst *observ.Instrumentation } // New returns a Prometheus Exporter. @@ -137,7 +141,10 @@ func New(opts ...Option) (*Exporter, error) { Reader: reader, } - return e, nil + var err error + collector.inst, err = observ.NewInstrumentation(counter.NextExporterID()) + + return e, err } // Describe implements prometheus.Collector. @@ -153,9 +160,26 @@ func (*collector) Describe(chan<- *prometheus.Desc) { // // This method is safe to call concurrently. func (c *collector) Collect(ch chan<- prometheus.Metric) { + var err error + // Blocked by this issue: Propagate context.Context through Gather and Collect (#1538) + // https://github.com/prometheus/client_golang/issues/1538. + ctx := context.TODO() + + if c.inst != nil { + timer := c.inst.RecordOperationDuration(ctx) + defer func() { timer.Stop(err) }() + } + metrics := metricsPool.Get().(*metricdata.ResourceMetrics) defer metricsPool.Put(metrics) - err := c.reader.Collect(context.TODO(), metrics) + + endCollection := func(error) {} + if c.inst != nil { + endCollection = c.inst.RecordCollectionDuration(ctx).Stop + } + err = c.reader.Collect(ctx, metrics) + endCollection(err) + if err != nil { if errors.Is(err, metric.ErrReaderShutdown) { return @@ -174,15 +198,16 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { defer c.mu.Unlock() if c.targetInfo == nil && !c.disableTargetInfo { - targetInfo, err := c.createInfoMetric( + targetInfo, e := c.createInfoMetric( otlptranslator.TargetInfoMetricName, targetInfoDescription, metrics.Resource, ) - if err != nil { + if e != nil { // If the target info metric is invalid, disable sending it. c.disableTargetInfo = true - otel.Handle(err) + otel.Handle(e) + err = errors.Join(err, fmt.Errorf("failed to createInfoMetric: %w", e)) return } @@ -195,14 +220,15 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 { - err := c.createResourceAttributes(metrics.Resource) - if err != nil { - otel.Handle(err) + e := c.createResourceAttributes(metrics.Resource) + if e != nil { + otel.Handle(e) + err = errors.Join(err, fmt.Errorf("failed to createResourceAttributes: %w", e)) return } } - for _, scopeMetrics := range metrics.ScopeMetrics { + for j, scopeMetrics := range metrics.ScopeMetrics { n := len(c.resourceKeyVals.keys) + 2 // resource attrs + scope name + scope version kv := keyVals{ keys: make([]string, 0, n), @@ -213,9 +239,10 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel) kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL) - attrKeys, attrVals, err := getAttrs(scopeMetrics.Scope.Attributes, c.labelNamer) - if err != nil { - otel.Handle(err) + attrKeys, attrVals, e := getAttrs(scopeMetrics.Scope.Attributes, c.labelNamer) + if e != nil { + reportError(ch, nil, e) + err = errors.Join(err, fmt.Errorf("failed to getAttrs for ScopeMetrics %d: %w", j, e)) continue } for i := range attrKeys { @@ -228,21 +255,22 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { kv.keys = append(kv.keys, c.resourceKeyVals.keys...) kv.vals = append(kv.vals, c.resourceKeyVals.vals...) - for _, m := range scopeMetrics.Metrics { + for k, m := range scopeMetrics.Metrics { typ := c.metricType(m) if typ == nil { + reportError(ch, nil, errInvalidMetricType) continue } - name, err := c.getName(m) - if err != nil { - // TODO(#7066): Handle this error better. It's not clear this can be - // reached, bad metric names should / will be caught at creation time. - otel.Handle(err) + name, e := c.getName(m) + if e != nil { + reportError(ch, nil, e) + err = errors.Join(err, fmt.Errorf("failed to getAttrs for ScopeMetrics %d, Metrics %d: %w", j, k, e)) continue } drop, help := c.validateMetrics(name, m.Description, typ) if drop { + reportError(ch, nil, errInvalidMetric) continue } @@ -252,21 +280,21 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, name, kv, c.labelNamer) + addHistogramMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, name, kv, c.labelNamer) + addHistogramMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.ExponentialHistogram[int64]: - addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer) + addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.ExponentialHistogram[float64]: - addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer) + addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, name, kv, c.labelNamer) + addSumMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, name, kv, c.labelNamer) + addSumMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, name, kv, c.labelNamer) + addGaugeMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, name, kv, c.labelNamer) + addGaugeMetric(ch, v, m, name, kv, c.labelNamer, c.inst, ctx) } } } @@ -332,11 +360,21 @@ func addExponentialHistogramMetric[N int64 | float64]( name string, kv keyVals, labelNamer otlptranslator.LabelNamer, + inst *observ.Instrumentation, + ctx context.Context, ) { - for _, dp := range histogram.DataPoints { - keys, values, err := getAttrs(dp.Attributes, labelNamer) - if err != nil { - otel.Handle(err) + var err error + var success int64 + if inst != nil { + op := inst.ExportMetrics(ctx, int64(len(histogram.DataPoints))) + defer func() { op.End(success, err) }() + } + + for j, dp := range histogram.DataPoints { + keys, values, e := getAttrs(dp.Attributes, labelNamer) + if e != nil { + reportError(ch, nil, e) + err = errors.Join(err, fmt.Errorf("failed to getAttrs for histogram.DataPoints %d: %w", j, e)) continue } keys = append(keys, kv.keys...) @@ -348,9 +386,12 @@ func addExponentialHistogramMetric[N int64 | float64]( scale := dp.Scale if scale < -4 { // Reject scales below -4 as they cannot be represented in Prometheus - otel.Handle(fmt.Errorf( - "exponential histogram scale %d is below minimum supported scale -4, skipping data point", - scale)) + reportError( + ch, + desc, + fmt.Errorf("%w: %d (min -4)", errEHScaleBelowMin, scale), + ) + err = errors.Join(err, e) continue } @@ -368,7 +409,9 @@ func addExponentialHistogramMetric[N int64 | float64]( positiveBuckets := make(map[int]int64) for i, c := range positiveBucket.Counts { if c > math.MaxInt64 { - otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c)) + e := fmt.Errorf("positive count %d is too large to be represented as int64", c) + otel.Handle(e) + err = errors.Join(err, e) continue } positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. @@ -377,13 +420,15 @@ func addExponentialHistogramMetric[N int64 | float64]( negativeBuckets := make(map[int]int64) for i, c := range negativeBucket.Counts { if c > math.MaxInt64 { - otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c)) + e := fmt.Errorf("negative count %d is too large to be represented as int64", c) + otel.Handle(e) + err = errors.Join(err, e) continue } negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above. } - m, err := prometheus.NewConstNativeHistogram( + m, e := prometheus.NewConstNativeHistogram( desc, dp.Count, float64(dp.Sum), @@ -394,12 +439,18 @@ func addExponentialHistogramMetric[N int64 | float64]( dp.ZeroThreshold, dp.StartTime, values...) - if err != nil { - otel.Handle(err) + if e != nil { + reportError(ch, desc, e) + err = errors.Join( + err, + fmt.Errorf("failed to NewConstNativeHistogram for histogram.DataPoints %d: %w", j, e), + ) continue } m = addExemplars(m, dp.Exemplars, labelNamer) ch <- m + + success++ } } @@ -410,11 +461,21 @@ func addHistogramMetric[N int64 | float64]( name string, kv keyVals, labelNamer otlptranslator.LabelNamer, + inst *observ.Instrumentation, + ctx context.Context, ) { - for _, dp := range histogram.DataPoints { - keys, values, err := getAttrs(dp.Attributes, labelNamer) - if err != nil { - otel.Handle(err) + var err error + var success int64 + if inst != nil { + op := inst.ExportMetrics(ctx, int64(len(histogram.DataPoints))) + defer func() { op.End(success, err) }() + } + + for j, dp := range histogram.DataPoints { + keys, values, e := getAttrs(dp.Attributes, labelNamer) + if e != nil { + reportError(ch, nil, e) + err = errors.Join(err, fmt.Errorf("failed to getAttrs for histogram.DataPoints %d: %w", j, e)) continue } keys = append(keys, kv.keys...) @@ -428,13 +489,16 @@ func addHistogramMetric[N int64 | float64]( cumulativeCount += dp.BucketCounts[i] buckets[bound] = cumulativeCount } - m, err := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...) - if err != nil { - otel.Handle(err) + m, e := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...) + if e != nil { + reportError(ch, desc, e) + err = errors.Join(err, fmt.Errorf("failed to NewConstMetric for histogram.DataPoints %d: %w", j, e)) continue } m = addExemplars(m, dp.Exemplars, labelNamer) ch <- m + + success++ } } @@ -445,25 +509,36 @@ func addSumMetric[N int64 | float64]( name string, kv keyVals, labelNamer otlptranslator.LabelNamer, + inst *observ.Instrumentation, + ctx context.Context, ) { + var err error + var success int64 + if inst != nil { + op := inst.ExportMetrics(ctx, int64(len(sum.DataPoints))) + defer func() { op.End(success, err) }() + } + valueType := prometheus.CounterValue if !sum.IsMonotonic { valueType = prometheus.GaugeValue } - for _, dp := range sum.DataPoints { - keys, values, err := getAttrs(dp.Attributes, labelNamer) - if err != nil { - otel.Handle(err) + for i, dp := range sum.DataPoints { + keys, values, e := getAttrs(dp.Attributes, labelNamer) + if e != nil { + reportError(ch, nil, e) + err = errors.Join(err, fmt.Errorf("failed to getAttrs for sum.DataPoints %d: %w", i, e)) continue } keys = append(keys, kv.keys...) values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) - m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...) - if err != nil { - otel.Handle(err) + m, e := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...) + if e != nil { + reportError(ch, desc, e) + err = errors.Join(err, fmt.Errorf("failed to NewConstMetric for sum.DataPoints %d: %w", i, e)) continue } // GaugeValues don't support Exemplars at this time @@ -472,6 +547,8 @@ func addSumMetric[N int64 | float64]( m = addExemplars(m, dp.Exemplars, labelNamer) } ch <- m + + success++ } } @@ -482,23 +559,36 @@ func addGaugeMetric[N int64 | float64]( name string, kv keyVals, labelNamer otlptranslator.LabelNamer, + inst *observ.Instrumentation, + ctx context.Context, ) { - for _, dp := range gauge.DataPoints { - keys, values, err := getAttrs(dp.Attributes, labelNamer) - if err != nil { - otel.Handle(err) + var err error + var success int64 + if inst != nil { + op := inst.ExportMetrics(ctx, int64(len(gauge.DataPoints))) + defer func() { op.End(success, err) }() + } + + for i, dp := range gauge.DataPoints { + keys, values, e := getAttrs(dp.Attributes, labelNamer) + if e != nil { + reportError(ch, nil, e) + err = errors.Join(err, fmt.Errorf("failed to getAttrs for gauge.DataPoints %d: %w", i, e)) continue } keys = append(keys, kv.keys...) values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) - m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...) - if err != nil { - otel.Handle(err) + m, e := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...) + if e != nil { + reportError(ch, desc, e) + err = errors.Join(err, fmt.Errorf("failed to NewConstMetric for gauge.DataPoints %d: %w", i, e)) continue } ch <- m + + success++ } } @@ -713,3 +803,10 @@ func attributesToLabels(attrs []attribute.KeyValue, labelNamer otlptranslator.La } return labels, nil } + +func reportError(ch chan<- prometheus.Metric, desc *prometheus.Desc, err error) { + if desc == nil { + desc = prometheus.NewInvalidDesc(err) + } + ch <- prometheus.NewInvalidMetric(desc, err) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/counter/counter.go new file mode 100644 index 00000000000..87808d548a7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/prometheus/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/gen.go new file mode 100644 index 00000000000..add058a2ec7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/gen.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package internal provides internal functionality for the prometheus +// package. +package internal // import "go.opentelemetry.io/otel/exporters/prometheus/internal" + +//go:generate gotmpl --body=../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/prometheus/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go + +//go:generate gotmpl --body=../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/prometheus\" }" --out=x/x.go +//go:generate gotmpl --body=../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go new file mode 100644 index 00000000000..d8215a6b1a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/observ/instrumentation.go @@ -0,0 +1,249 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation +// for the prometheus exporter. +package observ // import "go.opentelemetry.io/otel/exporters/prometheus/internal/observ" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/prometheus/internal" + "go.opentelemetry.io/otel/exporters/prometheus/internal/x" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ComponentType uniquely identifies the OpenTelemetry Exporter component + // being instrumented. + ComponentType = "go.opentelemetry.io/otel/exporters/prometheus/prometheus.Exporter" + + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/prometheus/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +func ComponentName(id int64) string { + return fmt.Sprintf("%s/%d", ComponentType, id) +} + +type Instrumentation struct { + inflightMetric metric.Int64UpDownCounter + exportedMetric metric.Int64Counter + operationDuration metric.Float64Histogram + collectionDuration metric.Float64Histogram + + attrs []attribute.KeyValue + setOpt metric.MeasurementOption +} + +func NewInstrumentation(id int64) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + i := &Instrumentation{ + attrs: []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeKey.String(ComponentType), + }, + } + + s := attribute.NewSet(i.attrs...) + i.setOpt = metric.WithAttributeSet(s) + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(SchemaURL), + ) + + var err, e error + + inflightMetric, e := otelconv.NewSDKExporterMetricDataPointInflight(m) + if e != nil { + e = fmt.Errorf("failed to create inflight metric: %w", e) + err = errors.Join(err, e) + } + i.inflightMetric = inflightMetric.Inst() + + exportedMetric, e := otelconv.NewSDKExporterMetricDataPointExported(m) + if e != nil { + e = fmt.Errorf("failed to create exported metric: %w", e) + err = errors.Join(err, e) + } + i.exportedMetric = exportedMetric.Inst() + + operationDuration, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.operationDuration = operationDuration.Inst() + + collectionDuration, e := otelconv.NewSDKMetricReaderCollectionDuration(m) + if e != nil { + e = fmt.Errorf("failed to create collection duration metric: %w", e) + err = errors.Join(err, e) + } + i.collectionDuration = collectionDuration.Inst() + + return i, err +} + +// RecordOperationDuration starts the timing of an operation. +// +// It returns a [Timer] that tracks the operation duration. The [Timer.Stop] +// method must be called when the operation completes. +func (i *Instrumentation) RecordOperationDuration(ctx context.Context) Timer { + return Timer{ + ctx: ctx, + start: time.Now(), + inst: i, + hist: i.operationDuration, + } +} + +// RecordCollectionDuration starts the timing of a collection operation. +// +// It returns a [Timer] that tracks the collection duration. The [Timer.Stop] +// method must be called when the collection completes. +func (i *Instrumentation) RecordCollectionDuration(ctx context.Context) Timer { + return Timer{ + ctx: ctx, + start: time.Now(), + inst: i, + hist: i.collectionDuration, + } +} + +// Timer tracks the duration of an operation. +type Timer struct { + ctx context.Context + start time.Time + + inst *Instrumentation + hist metric.Float64Histogram +} + +// Stop ends the timing operation and records the elapsed duration. +// +// If err is non-nil, an appropriate error type attribute will be included. +func (t Timer) Stop(err error) { + recordOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recordOpt) + *recordOpt = append(*recordOpt, t.inst.setOpt) + + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, t.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + set := attribute.NewSet(*attrs...) + *recordOpt = append((*recordOpt)[:0], metric.WithAttributeSet(set)) + } + + t.hist.Record(t.ctx, time.Since(t.start).Seconds(), *recordOpt...) +} + +// ExportMetrics starts the observation of a metric export operation. +// +// It returns an [ExportOp] that tracks the export operation. The +// [ExportOp.End] method must be called when the export completes. +func (i *Instrumentation) ExportMetrics(ctx context.Context, n int64) ExportOp { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.setOpt) + + i.inflightMetric.Add(ctx, n, *addOpt...) + + return ExportOp{ctx: ctx, nMetrics: n, inst: i} +} + +// ExportOp tracks a metric export operation. +type ExportOp struct { + ctx context.Context + nMetrics int64 + + inst *Instrumentation +} + +// End ends the observation of a metric export operation. +// +// The success parameter is the number of metrics that were successfully +// exported. If a non-nil error is provided, the number of failed metrics will +// be recorded with the error type attribute. +func (e ExportOp) End(success int64, err error) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.setOpt) + + e.inst.inflightMetric.Add(e.ctx, -e.nMetrics, *addOpt...) + e.inst.exportedMetric.Add(e.ctx, success, *addOpt...) + + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, e.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + set := attribute.NewSet(*attrs...) + + *addOpt = append((*addOpt)[:0], metric.WithAttributeSet(set)) + e.inst.exportedMetric.Add(e.ctx, e.nMetrics-success, *addOpt...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go new file mode 100644 index 00000000000..f551243075e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package internal provides internal utilities for the OpenTelemetry prometheus exporter. +package internal // import "go.opentelemetry.io/otel/exporters/prometheus/internal" + +// Version is the current release version of the OpenTelemetry prometheus +// exporter in use. +const Version = "0.61.0" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/README.md similarity index 64% rename from vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md rename to vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/README.md index feec16fa64b..f8c24eda27a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/README.md @@ -1,25 +1,27 @@ # Experimental Features -The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. +The `prometheus` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `prometheus` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. These features may change in backwards incompatible ways as feedback is applied. See the [Compatibility and Stability](#compatibility-and-stability) section for more information. ## Features -- [Self-Observability](#self-observability) +- [Observability](#observability) -### Self-Observability +### Observability -The SDK provides a self-observability feature that allows you to monitor the SDK itself. +The `prometheus` exporter can be configured to provide observability about itself using OpenTelemetry metrics. -To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. When enabled, the SDK will create the following metrics using the global `MeterProvider`: -- `otel.sdk.span.live` -- `otel.sdk.span.started` +- `otel.sdk.exporter.metric_data_point.inflight` +- `otel.sdk.exporter.metric_data_point.exported` +- `otel.sdk.metric_reader.collection.duration` +- `otel.sdk.exporter.operation.duration` Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/features.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/features.go new file mode 100644 index 00000000000..fc9aeb08dd8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/features.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x // import "go.opentelemetry.io/otel/exporters/prometheus/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/x.go new file mode 100644 index 00000000000..60d0baa728a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/internal/x/x.go @@ -0,0 +1,58 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/prometheus]. +package x // import "go.opentelemetry.io/otel/exporters/prometheus/internal/x" + +import ( + "os" +) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + keys []string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } + return Feature[T]{ + keys: keys, + parse: parse, + } +} + +// Keys returns the environment variable keys that can be set to enable the +// feature. +func (f Feature[T]) Keys() []string { return f.keys } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } + } + return v, ok +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/gen.go new file mode 100644 index 00000000000..6d14e32da7b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/gen.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package internal provides internal functionality for the stdouttrace +// package. +package internal // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal" + +//go:generate gotmpl --body=../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go + +//go:generate gotmpl --body=../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/stdout/stdouttrace\" }" --out=x/x.go +//go:generate gotmpl --body=../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go new file mode 100644 index 00000000000..051fe6ed621 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ/instrumentation.go @@ -0,0 +1,214 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation +// for the stdout trace exporter. +package observ // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ComponentType uniquely identifies the OpenTelemetry Exporter component + // being instrumented. + // + // The STDOUT trace exporter is not a standardized OTel component type, so + // it uses the Go package prefixed type name to ensure uniqueness and + // identity. + ComponentType = "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter" + + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL + + // Version is the current version of this instrumentation. + // + // This matches the version of the exporter. + Version = internal.Version +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +func ComponentName(id int64) string { + return fmt.Sprintf("%s/%d", ComponentType, id) +} + +// Instrumentation is experimental instrumentation for the exporter. +type Instrumentation struct { + inflightSpans metric.Int64UpDownCounter + exportedSpans metric.Int64Counter + opDuration metric.Float64Histogram + + attrs []attribute.KeyValue + setOpt metric.MeasurementOption +} + +// NewInstrumentation returns instrumentation for a STDOUT trace exporter with +// the provided ID using the global MeterProvider. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(id int64) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + i := &Instrumentation{ + attrs: []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(id)), + semconv.OTelComponentTypeKey.String(ComponentType), + }, + } + + s := attribute.NewSet(i.attrs...) + i.setOpt = metric.WithAttributeSet(s) + + mp := otel.GetMeterProvider() + m := mp.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version), + metric.WithSchemaURL(SchemaURL), + ) + + var err error + + inflightSpans, e := otelconv.NewSDKExporterSpanInflight(m) + if e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + i.inflightSpans = inflightSpans.Inst() + + exportedSpans, e := otelconv.NewSDKExporterSpanExported(m) + if e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + i.exportedSpans = exportedSpans.Inst() + + opDuration, e := otelconv.NewSDKExporterOperationDuration(m) + if e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + i.opDuration = opDuration.Inst() + + return i, err +} + +// ExportSpans instruments the ExportSpans method of the exporter. It returns a +// function that needs to be deferred so it is called when the method returns. +func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp { + start := time.Now() + + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, i.setOpt) + i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...) + + return ExportOp{ + ctx: ctx, + start: start, + nSpans: int64(nSpans), + inst: i, + } +} + +// ExportOp is an in-progress ExportSpans operation. +type ExportOp struct { + ctx context.Context + start time.Time + nSpans int64 + inst *Instrumentation +} + +// End ends the ExportSpans operation, recording its success and duration. +// +// The success parameter indicates how many spans were successfully exported. +// The err parameter indicates whether the operation failed. If err is not nil, +// the number of failed spans (nSpans - success) is also recorded. +func (e ExportOp) End(success int64, err error) { + addOpt := get[metric.AddOption](addOptPool) + defer put(addOptPool, addOpt) + *addOpt = append(*addOpt, e.inst.setOpt) + + e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...) + + // Record the success and duration of the operation. + // + // Do not exclude 0 values, as they are valid and indicate no spans + // were exported which is meaningful for certain aggregations. + e.inst.exportedSpans.Add(e.ctx, success, *addOpt...) + + mOpt := e.inst.setOpt + if err != nil { + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, e.inst.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + set := attribute.NewSet(*attrs...) + mOpt = metric.WithAttributeSet(set) + + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], mOpt) + + e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...) + } + + recordOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recordOpt) + *recordOpt = append(*recordOpt, mOpt) + e.inst.opDuration.Record(e.ctx, time.Since(e.start).Seconds(), *recordOpt...) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go new file mode 100644 index 00000000000..3046f6d3802 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/version.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal" + +// Version is the current release version of the OpenTelemetry stdouttrace +// exporter in use. +const Version = "1.39.0" diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md index 6b7d1aec876..c41ea34bfdd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/README.md @@ -8,13 +8,13 @@ See the [Compatibility and Stability](#compatibility-and-stability) section for ## Features -- [Self-Observability](#self-observability) +- [Observability](#observability) -### Self-Observability +### Observability -The `stdouttrace` exporter provides a self-observability feature that allows you to monitor the SDK itself. +The `stdouttrace` exporter can be configured to provide observability about itself using OpenTelemetry metrics. -To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. +To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`. When enabled, the SDK will create the following metrics using the global `MeterProvider`: diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/features.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/features.go new file mode 100644 index 00000000000..5c638d3b0cf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/features.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/stdout/stdouttrace]. +package x // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + +import "strings" + +// Observability is an experimental feature flag that determines if exporter +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go index 55bb98a9658..614242892e7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x/x.go @@ -1,3 +1,6 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -6,40 +9,30 @@ package x // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/inter import ( "os" - "strings" ) -// SelfObservability is an experimental feature flag that determines if SDK -// self-observability metrics are enabled. -// -// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -49,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } // Enabled reports whether the feature is enabled. diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go index d61324d2ee9..822ed5d7ee9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go @@ -11,23 +11,12 @@ import ( "sync" "time" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ" "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" ) -// otelComponentType is a name identifying the type of the OpenTelemetry -// component. It is not a standardized OTel component type, so it uses the -// Go package prefixed type name to ensure uniqueness and identity. -const otelComponentType = "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter" - var zeroTime time.Time var _ trace.SpanExporter = &Exporter{} @@ -46,39 +35,8 @@ func New(options ...Option) (*Exporter, error) { timestamps: cfg.Timestamps, } - if !x.SelfObservability.Enabled() { - return exporter, nil - } - - exporter.selfObservabilityEnabled = true - exporter.selfObservabilityAttrs = []attribute.KeyValue{ - semconv.OTelComponentName(fmt.Sprintf("%s/%d", otelComponentType, counter.NextExporterID())), - semconv.OTelComponentTypeKey.String(otelComponentType), - } - s := attribute.NewSet(exporter.selfObservabilityAttrs...) - exporter.selfObservabilitySetOpt = metric.WithAttributeSet(s) - - mp := otel.GetMeterProvider() - m := mp.Meter( - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - var err, e error - if exporter.spanInflightMetric, e = otelconv.NewSDKExporterSpanInflight(m); e != nil { - e = fmt.Errorf("failed to create span inflight metric: %w", e) - err = errors.Join(err, e) - } - if exporter.spanExportedMetric, e = otelconv.NewSDKExporterSpanExported(m); e != nil { - e = fmt.Errorf("failed to create span exported metric: %w", e) - err = errors.Join(err, e) - } - if exporter.operationDurationMetric, e = otelconv.NewSDKExporterOperationDuration(m); e != nil { - e = fmt.Errorf("failed to create operation duration metric: %w", e) - err = errors.Join(err, e) - } - + var err error + exporter.inst, err = observ.NewInstrumentation(counter.NextExporterID()) return exporter, err } @@ -91,107 +49,15 @@ type Exporter struct { stoppedMu sync.RWMutex stopped bool - selfObservabilityEnabled bool - selfObservabilityAttrs []attribute.KeyValue // selfObservability common attributes - selfObservabilitySetOpt metric.MeasurementOption - spanInflightMetric otelconv.SDKExporterSpanInflight - spanExportedMetric otelconv.SDKExporterSpanExported - operationDurationMetric otelconv.SDKExporterOperationDuration + inst *observ.Instrumentation } -var ( - measureAttrsPool = sync.Pool{ - New: func() any { - // "component.name" + "component.type" + "error.type" - const n = 1 + 1 + 1 - s := make([]attribute.KeyValue, 0, n) - // Return a pointer to a slice instead of a slice itself - // to avoid allocations on every call. - return &s - }, - } - - addOptPool = &sync.Pool{ - New: func() any { - const n = 1 // WithAttributeSet - o := make([]metric.AddOption, 0, n) - return &o - }, - } - - recordOptPool = &sync.Pool{ - New: func() any { - const n = 1 // WithAttributeSet - o := make([]metric.RecordOption, 0, n) - return &o - }, - } -) - // ExportSpans writes spans in json format to stdout. func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) (err error) { var success int64 - if e.selfObservabilityEnabled { - count := int64(len(spans)) - - addOpt := addOptPool.Get().(*[]metric.AddOption) - defer func() { - *addOpt = (*addOpt)[:0] - addOptPool.Put(addOpt) - }() - - *addOpt = append(*addOpt, e.selfObservabilitySetOpt) - - e.spanInflightMetric.Inst().Add(ctx, count, *addOpt...) - defer func(starting time.Time) { - e.spanInflightMetric.Inst().Add(ctx, -count, *addOpt...) - - // Record the success and duration of the operation. - // - // Do not exclude 0 values, as they are valid and indicate no spans - // were exported which is meaningful for certain aggregations. - e.spanExportedMetric.Inst().Add(ctx, success, *addOpt...) - - mOpt := e.selfObservabilitySetOpt - if err != nil { - // additional attributes for self-observability, - // only spanExportedMetric and operationDurationMetric are supported. - attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) - defer func() { - *attrs = (*attrs)[:0] // reset the slice for reuse - measureAttrsPool.Put(attrs) - }() - *attrs = append(*attrs, e.selfObservabilityAttrs...) - *attrs = append(*attrs, semconv.ErrorType(err)) - - // Do not inefficiently make a copy of attrs by using - // WithAttributes instead of WithAttributeSet. - set := attribute.NewSet(*attrs...) - mOpt = metric.WithAttributeSet(set) - - // Reset addOpt with new attribute set. - *addOpt = append((*addOpt)[:0], mOpt) - - e.spanExportedMetric.Inst().Add( - ctx, - count-success, - *addOpt..., - ) - } - - recordOpt := recordOptPool.Get().(*[]metric.RecordOption) - defer func() { - *recordOpt = (*recordOpt)[:0] - recordOptPool.Put(recordOpt) - }() - - *recordOpt = append(*recordOpt, mOpt) - e.operationDurationMetric.Inst().Record( - ctx, - time.Since(starting).Seconds(), - *recordOpt..., - ) - }(time.Now()) + if e.inst != nil { + op := e.inst.ExportSpans(ctx, len(spans)) + defer func() { op.End(success, err) }() } if err := ctx.Err(); err != nil { diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index adb37b5b0e7..6db969f73c7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -105,7 +105,7 @@ type delegatedInstrument interface { setDelegate(metric.Meter) } -// instID are the identifying properties of a instrument. +// instID are the identifying properties of an instrument. type instID struct { // name is the name of the stream. name string diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index 1e6473b32f3..527d9aec86b 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -11,7 +11,7 @@ import ( // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be +// If the name is empty, then an implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index d9e3b13e4d1..e42dd6e70ab 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -3,7 +3,11 @@ package metric // import "go.opentelemetry.io/otel/metric" -import "go.opentelemetry.io/otel/attribute" +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) // MeterConfig contains options for Meters. type MeterConfig struct { @@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// WithInstrumentationAttributes adds the instrumentation attributes. +// +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. // -// The passed attributes will be de-duplicated. +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { + if set.Len() == 0 { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + return config + }) + } + return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6692d2665d2..271ab71f1ae 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { } // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go new file mode 100644 index 00000000000..bfeb73e811b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import "strings" + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature( + []string{"RESOURCE"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) + +// Observability is an experimental feature flag that determines if SDK +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 1be472e917a..13347e56052 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -1,48 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x contains support for OTel SDK experimental features. -// -// This package should only be used for features defined in the specification. -// It should not be used for experiments or new project ideas. +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. package x // import "go.opentelemetry.io/otel/sdk/internal/x" import ( "os" - "strings" ) -// Resource is an experimental feature flag that defines if resource detectors -// should be included experimental semantic conventions. -// -// To enable this feature set the OTEL_GO_X_RESOURCE environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.EqualFold(v, "true") { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -52,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } // Enabled reports whether the feature is enabled. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 0f3b9d623f7..dd75eefac1f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -63,6 +63,22 @@ // issues for databases that cannot handle high cardinality. // - A too low of a limit causes loss of attribute detail as more data falls into overflow. // +// # Ordering and Collection Guarantees +// +// For performance reasons, the SDK does not guarantee that the order in which +// synchronous measurements are made to the SDK is reflected in the collected +// metric data. This means that even when a single goroutine makes sequential +// synchronous measurements, it is possible for a later measurement to be +// included in the collected metric data when an earlier measurement is not. +// This applies to measurements made to different instruments, or to different +// attribute sets on the same instrument. Sequential measurements made to the +// same instrument and with the same attributes are guaranteed to preserve +// ordering with respect to collection. +// +// Additionally, the SDK does not guarantee that exemplars are always included +// in the same batch of metric data as the measurement they are associated +// with. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go index 08e8f68fe73..453278a0c38 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -7,9 +7,11 @@ import ( "context" "math" "math/rand/v2" + "sync" "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. @@ -34,7 +36,9 @@ var _ Reservoir = &FixedSizeReservoir{} // If there are more than k, the Reservoir will then randomly sample all // additional measurement with a decreasing probability. type FixedSizeReservoir struct { + reservoir.ConcurrentSafe *storage + mu sync.Mutex // count is the number of measurement seen. count int64 @@ -123,12 +127,14 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a // https://github.com/MrAlias/reservoir-sampling for a performance // comparison of reservoir sampling algorithms. - if int(r.count) < cap(r.store) { - r.store[r.count] = newMeasurement(ctx, t, n, a) + r.mu.Lock() + defer r.mu.Unlock() + if int(r.count) < cap(r.measurements) { + r.store(int(r.count), newMeasurement(ctx, t, n, a)) } else if r.count == r.next { // Overwrite a random existing measurement with the one offered. - idx := int(rand.Int64N(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) + idx := int(rand.Int64N(int64(cap(r.measurements)))) + r.store(idx, newMeasurement(ctx, t, n, a)) r.advance() } r.count++ @@ -139,7 +145,7 @@ func (r *FixedSizeReservoir) reset() { // This resets the number of exemplars known. r.count = 0 // Random index inserts should only happen after the storage is full. - r.next = int64(cap(r.store)) + r.next = int64(cap(r.measurements)) // Initial random number in the series used to generate r.next. // @@ -150,7 +156,7 @@ func (r *FixedSizeReservoir) reset() { // This maps the uniform random number in (0,1) to a geometric distribution // over the same interval. The mean of the distribution is inversely // proportional to the storage capacity. - r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.measurements))) r.advance() } @@ -170,7 +176,7 @@ func (r *FixedSizeReservoir) advance() { // therefore the next r.w will be based on the same distribution (i.e. // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by // computing the next random number `u` and take r.w as `w * u^(1/k)`. - r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.measurements))) // Use the new random number in the series to calculate the count of the // next measurement that will be stored. // @@ -188,6 +194,8 @@ func (r *FixedSizeReservoir) advance() { // // The Reservoir state is preserved after this call. func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) { + r.mu.Lock() + defer r.mu.Unlock() r.storage.Collect(dest) // Call reset here even though it will reset r.count and restart the random // number series. This will persist any old exemplars as long as no new diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go index decab613e77..60c871a4432 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -7,9 +7,11 @@ import ( "context" "slices" "sort" + "sync" "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // HistogramReservoirProvider is a provider of [HistogramReservoir]. @@ -39,7 +41,9 @@ var _ Reservoir = &HistogramReservoir{} // falls within a histogram bucket. The histogram bucket upper-boundaries are // define by bounds. type HistogramReservoir struct { + reservoir.ConcurrentSafe *storage + mu sync.Mutex // bounds are bucket bounds in ascending order. bounds []float64 @@ -57,14 +61,29 @@ type HistogramReservoir struct { // parameters are the value and dropped (filtered) attributes of the // measurement respectively. func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { - var x float64 + var n float64 switch v.Type() { case Int64ValueType: - x = float64(v.Int64()) + n = float64(v.Int64()) case Float64ValueType: - x = v.Float64() + n = v.Float64() default: panic("unknown value type") } - r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) + + idx := sort.SearchFloat64s(r.bounds, n) + m := newMeasurement(ctx, t, v, a) + + r.mu.Lock() + defer r.mu.Unlock() + r.store(idx, m) +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *HistogramReservoir) Collect(dest *[]Exemplar) { + r.mu.Lock() + defer r.mu.Unlock() + r.storage.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go index 0e2e26dfb18..16b61c07dec 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go @@ -13,24 +13,28 @@ import ( // storage is an exemplar storage for [Reservoir] implementations. type storage struct { - // store are the measurements sampled. + // measurements are the measurements sampled. // // This does not use []metricdata.Exemplar because it potentially would // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement + measurements []measurement } func newStorage(n int) *storage { - return &storage{store: make([]measurement, n)} + return &storage{measurements: make([]measurement, n)} +} + +func (r *storage) store(idx int, m measurement) { + r.measurements[idx] = m } // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. func (r *storage) Collect(dest *[]Exemplar) { - *dest = reset(*dest, len(r.store), len(r.store)) + *dest = reset(*dest, len(r.measurements), len(r.measurements)) var n int - for _, m := range r.store { + for _, m := range r.measurements { if !m.valid { continue } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go index 25ea6244e57..e0558cb634d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -23,8 +23,9 @@ const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogr var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} func (i InstrumentKind) String() string { - if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_InstrumentKind_index)-1 { return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] + return _InstrumentKind_name[_InstrumentKind_index[idx]:_InstrumentKind_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index 0321da68150..2b60410801b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -110,12 +110,13 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati // Sum returns a sum aggregate function input and output. func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { - s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: - return b.filter(s.measure), s.delta + s := newDeltaSum[N](monotonic, b.AggregationLimit, b.resFunc()) + return b.filter(s.measure), s.collect default: - return b.filter(s.measure), s.cumulative + s := newCumulativeSum[N](monotonic, b.AggregationLimit, b.resFunc()) + return b.filter(s.measure), s.collect } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go new file mode 100644 index 00000000000..0fa6d3c6fa8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go @@ -0,0 +1,184 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "math" + "runtime" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" +) + +// atomicCounter is an efficient way of adding to a number which is either an +// int64 or float64. It is designed to be efficient when adding whole +// numbers, regardless of whether N is an int64 or float64. +// +// Inspired by the Prometheus counter implementation: +// https://github.com/prometheus/client_golang/blob/14ccb93091c00f86b85af7753100aa372d63602b/prometheus/counter.go#L108 +type atomicCounter[N int64 | float64] struct { + // nFloatBits contains only the non-integer portion of the counter. + nFloatBits atomic.Uint64 + // nInt contains only the integer portion of the counter. + nInt atomic.Int64 +} + +// load returns the current value. The caller must ensure all calls to add have +// returned prior to calling load. +func (n *atomicCounter[N]) load() N { + fval := math.Float64frombits(n.nFloatBits.Load()) + ival := n.nInt.Load() + return N(fval + float64(ival)) +} + +func (n *atomicCounter[N]) add(value N) { + ival := int64(value) + // This case is where the value is an int, or if it is a whole-numbered float. + if float64(ival) == float64(value) { + n.nInt.Add(ival) + return + } + + // Value must be a float below. + for { + oldBits := n.nFloatBits.Load() + newBits := math.Float64bits(math.Float64frombits(oldBits) + float64(value)) + if n.nFloatBits.CompareAndSwap(oldBits, newBits) { + return + } + } +} + +// hotColdWaitGroup is a synchronization primitive which enables lockless +// writes for concurrent writers and enables a reader to acquire exclusive +// access to a snapshot of state including only completed operations. +// Conceptually, it can be thought of as a "hot" wait group, +// and a "cold" wait group, with the ability for the reader to atomically swap +// the hot and cold wait groups, and wait for the now-cold wait group to +// complete. +// +// Inspired by the prometheus/client_golang histogram implementation: +// https://github.com/prometheus/client_golang/blob/a974e0d45e0aa54c65492559114894314d8a2447/prometheus/histogram.go#L725 +// +// Usage: +// +// var hcwg hotColdWaitGroup +// var data [2]any +// +// func write() { +// hotIdx := hcwg.start() +// defer hcwg.done(hotIdx) +// // modify data without locking +// data[hotIdx].update() +// } +// +// func read() { +// coldIdx := hcwg.swapHotAndWait() +// // read data now that all writes to the cold data have completed. +// data[coldIdx].read() +// } +type hotColdWaitGroup struct { + // startedCountAndHotIdx contains a 63-bit counter in the lower bits, + // and a 1 bit hot index to denote which of the two data-points new + // measurements to write to. These are contained together so that read() + // can atomically swap the hot bit, reset the started writes to zero, and + // read the number writes that were started prior to the hot bit being + // swapped. + startedCountAndHotIdx atomic.Uint64 + // endedCounts is the number of writes that have completed to each + // dataPoint. + endedCounts [2]atomic.Uint64 +} + +// start returns the hot index that the writer should write to. The returned +// hot index is 0 or 1. The caller must call done(hot index) after it finishes +// its operation. start() is safe to call concurrently with other methods. +func (l *hotColdWaitGroup) start() uint64 { + // We increment h.startedCountAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to return the currently-hot index. + return l.startedCountAndHotIdx.Add(1) >> 63 +} + +// done signals to the reader that an operation has fully completed. +// done is safe to call concurrently. +func (l *hotColdWaitGroup) done(hotIdx uint64) { + l.endedCounts[hotIdx].Add(1) +} + +// swapHotAndWait swaps the hot bit, waits for all start() calls to be done(), +// and then returns the now-cold index for the reader to read from. The +// returned index is 0 or 1. swapHotAndWait must not be called concurrently. +func (l *hotColdWaitGroup) swapHotAndWait() uint64 { + n := l.startedCountAndHotIdx.Load() + coldIdx := (^n) >> 63 + // Swap the hot and cold index while resetting the started measurements + // count to zero. + n = l.startedCountAndHotIdx.Swap((coldIdx << 63)) + hotIdx := n >> 63 + startedCount := n & ((1 << 63) - 1) + // Wait for all measurements to the previously-hot map to finish. + for startedCount != l.endedCounts[hotIdx].Load() { + runtime.Gosched() // Let measurements complete. + } + // reset the number of ended operations + l.endedCounts[hotIdx].Store(0) + return hotIdx +} + +// limitedSyncMap is a sync.Map which enforces the aggregation limit on +// attribute sets and provides a Len() function. +type limitedSyncMap struct { + sync.Map + aggLimit int + len int + lenMux sync.Mutex +} + +func (m *limitedSyncMap) LoadOrStoreAttr(fltrAttr attribute.Set, newValue func(attribute.Set) any) any { + actual, loaded := m.Load(fltrAttr.Equivalent()) + if loaded { + return actual + } + // If the overflow set exists, assume we have already overflowed and don't + // bother with the slow path below. + actual, loaded = m.Load(overflowSet.Equivalent()) + if loaded { + return actual + } + // Slow path: add a new attribute set. + m.lenMux.Lock() + defer m.lenMux.Unlock() + + // re-fetch now that we hold the lock to ensure we don't use the overflow + // set unless we are sure the attribute set isn't being written + // concurrently. + actual, loaded = m.Load(fltrAttr.Equivalent()) + if loaded { + return actual + } + + if m.aggLimit > 0 && m.len >= m.aggLimit-1 { + fltrAttr = overflowSet + } + actual, loaded = m.LoadOrStore(fltrAttr.Equivalent(), newValue(fltrAttr)) + if !loaded { + m.len++ + } + return actual +} + +func (m *limitedSyncMap) Clear() { + m.lenMux.Lock() + defer m.lenMux.Unlock() + m.len = 0 + m.Map.Clear() +} + +func (m *limitedSyncMap) Len() int { + m.lenMux.Lock() + defer m.lenMux.Unlock() + return m.len +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index 857eddf305f..5b3a19c067d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -301,7 +301,7 @@ func newExponentialHistogram[N int64 | float64]( maxScale: maxScale, newRes: r, - limit: newLimiter[*expoHistogramDataPoint[N]](limit), + limit: newLimiter[expoHistogramDataPoint[N]](limit), values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), start: now(), @@ -317,7 +317,7 @@ type expoHistogram[N int64 | float64] struct { maxScale int32 newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[*expoHistogramDataPoint[N]] + limit limiter[expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -338,13 +338,18 @@ func (e *expoHistogram[N]) measure( e.valuesMu.Lock() defer e.valuesMu.Unlock() - attr := e.limit.Attributes(fltrAttr, e.values) - v, ok := e.values[attr.Equivalent()] + v, ok := e.values[fltrAttr.Equivalent()] if !ok { - v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) - v.res = e.newRes(attr) - - e.values[attr.Equivalent()] = v + fltrAttr = e.limit.Attributes(fltrAttr, e.values) + // If we overflowed, make sure we add to the existing overflow series + // if it already exists. + v, ok = e.values[fltrAttr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](fltrAttr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes(fltrAttr) + + e.values[fltrAttr.Equivalent()] = v + } } v.record(value) v.res.Offer(ctx, value, droppedAttr) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go index d4c41642d79..e4f9409bc80 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go @@ -5,10 +5,12 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" + "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter. @@ -29,6 +31,11 @@ type FilteredExemplarReservoir[N int64 | float64] interface { type filteredExemplarReservoir[N int64 | float64] struct { filter exemplar.Filter reservoir exemplar.Reservoir + // The exemplar.Reservoir is not required to be concurrent safe, but + // implementations can indicate that they are concurrent-safe by embedding + // reservoir.ConcurrentSafe in order to improve performance. + reservoirMux sync.Mutex + concurrentSafe bool } // NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values @@ -37,17 +44,30 @@ func NewFilteredExemplarReservoir[N int64 | float64]( f exemplar.Filter, r exemplar.Reservoir, ) FilteredExemplarReservoir[N] { + _, concurrentSafe := r.(reservoir.ConcurrentSafe) return &filteredExemplarReservoir[N]{ - filter: f, - reservoir: r, + filter: f, + reservoir: r, + concurrentSafe: concurrentSafe, } } func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { if f.filter(ctx) { // only record the current time if we are sampling this measurement. - f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr) + ts := time.Now() + if !f.concurrentSafe { + f.reservoirMux.Lock() + defer f.reservoirMux.Unlock() + } + f.reservoir.Offer(ctx, ts, exemplar.NewValue(val), attr) } } -func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) } +func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { + if !f.concurrentSafe { + f.reservoirMux.Lock() + defer f.reservoirMux.Unlock() + } + f.reservoir.Collect(dest) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 736287e736f..a094519cf6d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -31,9 +31,12 @@ func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { func (b *buckets[N]) sum(value N) { b.total += value } -func (b *buckets[N]) bin(idx int, value N) { +func (b *buckets[N]) bin(idx int) { b.counts[idx]++ b.count++ +} + +func (b *buckets[N]) minMax(value N) { if value < b.min { b.min = value } else if value > b.max { @@ -44,18 +47,19 @@ func (b *buckets[N]) bin(idx int, value N) { // histValues summarizes a set of measurements as an histValues with // explicitly defined buckets. type histValues[N int64 | float64] struct { - noSum bool - bounds []float64 + noMinMax bool + noSum bool + bounds []float64 newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[*buckets[N]] + limit limiter[buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } func newHistValues[N int64 | float64]( bounds []float64, - noSum bool, + noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N], ) *histValues[N] { @@ -66,11 +70,12 @@ func newHistValues[N int64 | float64]( b := slices.Clone(bounds) slices.Sort(b) return &histValues[N]{ - noSum: noSum, - bounds: b, - newRes: r, - limit: newLimiter[*buckets[N]](limit), - values: make(map[attribute.Distinct]*buckets[N]), + noMinMax: noMinMax, + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), } } @@ -92,24 +97,32 @@ func (s *histValues[N]) measure( s.valuesMu.Lock() defer s.valuesMu.Unlock() - attr := s.limit.Attributes(fltrAttr, s.values) - b, ok := s.values[attr.Equivalent()] + b, ok := s.values[fltrAttr.Equivalent()] if !ok { - // N+1 buckets. For example: - // - // bounds = [0, 5, 10] - // - // Then, - // - // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) - b = newBuckets[N](attr, len(s.bounds)+1) - b.res = s.newRes(attr) - - // Ensure min and max are recorded values (not zero), for new buckets. - b.min, b.max = value, value - s.values[attr.Equivalent()] = b + fltrAttr = s.limit.Attributes(fltrAttr, s.values) + // If we overflowed, make sure we add to the existing overflow series + // if it already exists. + b, ok = s.values[fltrAttr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](fltrAttr, len(s.bounds)+1) + b.res = s.newRes(fltrAttr) + + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[fltrAttr.Equivalent()] = b + } + } + b.bin(idx) + if !s.noMinMax { + b.minMax(value) } - b.bin(idx, value) if !s.noSum { b.sum(value) } @@ -125,8 +138,7 @@ func newHistogram[N int64 | float64]( r func(attribute.Set) FilteredExemplarReservoir[N], ) *histogram[N] { return &histogram[N]{ - histValues: newHistValues[N](boundaries, noSum, limit, r), - noMinMax: noMinMax, + histValues: newHistValues[N](boundaries, noMinMax, noSum, limit, r), start: now(), } } @@ -136,8 +148,7 @@ func newHistogram[N int64 | float64]( type histogram[N int64 | float64] struct { *histValues[N] - noMinMax bool - start time.Time + start time.Time } func (s *histogram[N]) delta( diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 4bbe624c77c..3e2ed741505 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -23,7 +23,7 @@ func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredEx return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), - values: make(map[attribute.Distinct]datapoint[N]), + values: make(map[attribute.Distinct]*datapoint[N]), start: now(), } } @@ -34,7 +34,7 @@ type lastValue[N int64 | float64] struct { newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[datapoint[N]] - values map[attribute.Distinct]datapoint[N] + values map[attribute.Distinct]*datapoint[N] start time.Time } @@ -42,17 +42,19 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. s.Lock() defer s.Unlock() - attr := s.limit.Attributes(fltrAttr, s.values) - d, ok := s.values[attr.Equivalent()] + d, ok := s.values[fltrAttr.Equivalent()] if !ok { - d.res = s.newRes(attr) + fltrAttr = s.limit.Attributes(fltrAttr, s.values) + d = &datapoint[N]{ + res: s.newRes(fltrAttr), + attrs: fltrAttr, + } } - d.attrs = attr d.value = value d.res.Offer(ctx, value, droppedAttr) - s.values[attr.Equivalent()] = d + s.values[fltrAttr.Equivalent()] = d } func (s *lastValue[N]) delta( diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go index 9ea0251edd7..c19a1aff68f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -30,7 +30,7 @@ func newLimiter[V any](aggregation int) limiter[V] { // aggregation cardinality limit for the existing measurements. If it will, // overflowSet is returned. Otherwise, if it will not exceed the limit, or the // limit is not set (limit <= 0), attr is returned. -func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]*V) attribute.Set { if l.aggLimit > 0 { _, exists := measurements[attrs.Equivalent()] if !exists && len(measurements) >= l.aggLimit-1 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 1b4b2304c0b..81690855114 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -5,7 +5,6 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" - "sync" "time" "go.opentelemetry.io/otel/attribute" @@ -13,64 +12,75 @@ import ( ) type sumValue[N int64 | float64] struct { - n N + n atomicCounter[N] res FilteredExemplarReservoir[N] attrs attribute.Set } -// valueMap is the storage for sums. type valueMap[N int64 | float64] struct { - sync.Mutex + values limitedSyncMap newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[sumValue[N]] - values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { - return &valueMap[N]{ - newRes: r, - limit: newLimiter[sumValue[N]](limit), - values: make(map[attribute.Distinct]sumValue[N]), - } -} - -func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - s.Lock() - defer s.Unlock() - - attr := s.limit.Attributes(fltrAttr, s.values) - v, ok := s.values[attr.Equivalent()] - if !ok { - v.res = s.newRes(attr) - } - - v.attrs = attr - v.n += value - v.res.Offer(ctx, value, droppedAttr) - - s.values[attr.Equivalent()] = v +func (s *valueMap[N]) measure( + ctx context.Context, + value N, + fltrAttr attribute.Set, + droppedAttr []attribute.KeyValue, +) { + sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { + return &sumValue[N]{ + res: s.newRes(attr), + attrs: attr, + } + }).(*sumValue[N]) + sv.n.add(value) + // It is possible for collection to race with measurement and observe the + // exemplar in the batch of metrics after the add() for cumulative sums. + // This is an accepted tradeoff to avoid locking during measurement. + sv.res.Offer(ctx, value, droppedAttr) } -// newSum returns an aggregator that summarizes a set of measurements as their -// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle -// the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { - return &sum[N]{ - valueMap: newValueMap[N](limit, r), +// newDeltaSum returns an aggregator that summarizes a set of measurements as +// their arithmetic sum. Each sum is scoped by attributes and the aggregation +// cycle the measurements were made in. +func newDeltaSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *deltaSum[N] { + return &deltaSum[N]{ monotonic: monotonic, start: now(), + hotColdValMap: [2]valueMap[N]{ + { + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + { + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + }, } } -// sum summarizes a set of measurements made as their arithmetic sum. -type sum[N int64 | float64] struct { - *valueMap[N] - +// deltaSum is the storage for sums which resets every collection interval. +type deltaSum[N int64 | float64] struct { monotonic bool start time.Time + + hcwg hotColdWaitGroup + hotColdValMap [2]valueMap[N] +} + +func (s *deltaSum[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + hotIdx := s.hcwg.start() + defer s.hcwg.done(hotIdx) + s.hotColdValMap[hotIdx].measure(ctx, value, fltrAttr, droppedAttr) } -func (s *sum[N]) delta( +func (s *deltaSum[N]) collect( dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface ) int { t := now() @@ -81,33 +91,61 @@ func (s *sum[N]) delta( sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // delta always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for _, val := range s.values { + s.hotColdValMap[readIdx].values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = val.n - collectExemplars(&dPts[i].Exemplars, val.res.Collect) + dPts[i].Value = val.n.load() i++ - } - // Do not report stale values. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() // The delta collection cycle resets. s.start = t sData.DataPoints = dPts *dest = sData - return n + return i +} + +// newCumulativeSum returns an aggregator that summarizes a set of measurements +// as their arithmetic sum. Each sum is scoped by attributes and the +// aggregation cycle the measurements were made in. +func newCumulativeSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *cumulativeSum[N] { + return &cumulativeSum[N]{ + monotonic: monotonic, + start: now(), + valueMap: valueMap[N]{ + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + } } -func (s *sum[N]) cumulative( +// deltaSum is the storage for sums which never reset. +type cumulativeSum[N int64 | float64] struct { + monotonic bool + start time.Time + + valueMap[N] +} + +func (s *cumulativeSum[N]) collect( dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface ) int { t := now() @@ -118,30 +156,33 @@ func (s *sum[N]) cumulative( sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) - dPts := reset(sData.DataPoints, n, n) + // Values are being concurrently written while we iterate, so only use the + // current length for capacity. + dPts := reset(sData.DataPoints, 0, s.values.Len()) var i int - for _, value := range s.values { - dPts[i].Attributes = value.attrs - dPts[i].StartTime = s.start - dPts[i].Time = t - dPts[i].Value = value.n - collectExemplars(&dPts[i].Exemplars, value.res.Collect) + s.values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + newPt := metricdata.DataPoint[N]{ + Attributes: val.attrs, + StartTime: s.start, + Time: t, + Value: val.n.load(), + } + collectExemplars(&newPt.Exemplars, val.res.Collect) + dPts = append(dPts, newPt) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. i++ - } + return true + }) sData.DataPoints = dPts *dest = sData - return n + return i } // newPrecomputedSum returns an aggregator that summarizes a set of @@ -153,27 +194,22 @@ func newPrecomputedSum[N int64 | float64]( r func(attribute.Set) FilteredExemplarReservoir[N], ) *precomputedSum[N] { return &precomputedSum[N]{ - valueMap: newValueMap[N](limit, r), - monotonic: monotonic, - start: now(), + deltaSum: newDeltaSum(monotonic, limit, r), } } // precomputedSum summarizes a set of observations as their arithmetic sum. type precomputedSum[N int64 | float64] struct { - *valueMap[N] + *deltaSum[N] - monotonic bool - start time.Time - - reported map[attribute.Distinct]N + reported map[any]N } func (s *precomputedSum[N]) delta( dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface ) int { t := now() - newReported := make(map[attribute.Distinct]N) + newReported := make(map[any]N) // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. @@ -181,27 +217,29 @@ func (s *precomputedSum[N]) delta( sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // delta always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for key, value := range s.values { - delta := value.n - s.reported[key] + s.hotColdValMap[readIdx].values.Range(func(key, value any) bool { + val := value.(*sumValue[N]) + n := val.n.load() - dPts[i].Attributes = value.attrs + delta := n - s.reported[key] + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta - collectExemplars(&dPts[i].Exemplars, value.res.Collect) - - newReported[key] = value.n + newReported[key] = n i++ - } - // Unused attribute sets do not report. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() s.reported = newReported // The delta collection cycle resets. s.start = t @@ -209,7 +247,7 @@ func (s *precomputedSum[N]) delta( sData.DataPoints = dPts *dest = sData - return n + return i } func (s *precomputedSum[N]) cumulative( @@ -223,27 +261,28 @@ func (s *precomputedSum[N]) cumulative( sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // cumulative precomputed always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for _, val := range s.values { + s.hotColdValMap[readIdx].values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = val.n - collectExemplars(&dPts[i].Exemplars, val.res.Collect) - + dPts[i].Value = val.n.load() i++ - } - // Unused attribute sets do not report. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() sData.DataPoints = dPts *dest = sData - return n + return i } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go new file mode 100644 index 00000000000..41cfc6bcbef --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// metric reader. +package observ // import "go.opentelemetry.io/otel/sdk/metric/internal/observ" + +import ( + "context" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/sdk/metric/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 // error.type + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the metric reader with the +// provided ComponentType and ID. +func ComponentName(componentType string, id int64) string { + return fmt.Sprintf("%s/%d", componentType, id) +} + +// Instrumentation is experimental instrumentation for the metric reader. +type Instrumentation struct { + colDuration metric.Float64Histogram + + attrs []attribute.KeyValue + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for metric reader with the provided component +// type (such as periodic and manual metric reader) and ID. It uses the global +// MeterProvider to create the instrumentation. +// +// The id should be the unique metric reader instance ID. It is used +// to set the "component.name" attribute. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(componentType string, id int64) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + i := &Instrumentation{ + attrs: []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(componentType, id)), + semconv.OTelComponentTypeKey.String(componentType), + }, + } + + r := attribute.NewSet(i.attrs...) + i.recOpt = metric.WithAttributeSet(r) + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + colDuration, err := otelconv.NewSDKMetricReaderCollectionDuration(meter) + if err != nil { + err = fmt.Errorf("failed to create collection duration metric: %w", err) + } + i.colDuration = colDuration.Inst() + + return i, err +} + +// CollectMetrics instruments the collect method of metric reader. It returns an +// [CollectOp] that must have its [CollectOp.End] method called when the +// collection end. +func (i *Instrumentation) CollectMetrics(ctx context.Context) CollectOp { + start := time.Now() + + return CollectOp{ + ctx: ctx, + start: start, + inst: i, + } +} + +// CollectOp tracks the collect operation being observed by +// [Instrumentation.CollectMetrics]. +type CollectOp struct { + ctx context.Context + start time.Time + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.CollectMetrics]. +// +// Any error that is encountered is provided as err. +func (e CollectOp) End(err error) { + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err)) + + d := time.Since(e.start).Seconds() + e.inst.colDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the collection being recorded. +// +// If err is nil, the default recOpt of the Instrumentation is returned. +// +// Otherwise, a new RecordOption is returned with the base attributes of the +// Instrumentation plus the error.type attribute set to the type of the error. +func (i *Instrumentation) recordOption(err error) metric.RecordOption { + if err == nil { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go new file mode 100644 index 00000000000..3be234a4108 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" + +// ConcurrentSafe is an interface that can be embedded in an +// exemplar.Reservoir to indicate to the SDK that it is safe to invoke its +// methods concurrently. If this interface is not embedded, the SDK assumes it +// is not safe to call concurrently and locks around Reservoir methods. This +// is currently only used by the built-in reservoirs. +type ConcurrentSafe interface{ concurrentSafe() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go new file mode 100644 index 00000000000..6cd213b5f32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package reservoir contains experimental features used by built-in exemplar +// reservoirs which require coordination with the metrics SDK. +package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 85d3dc20768..5b0630207b5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -10,10 +10,18 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) +const ( + // ManualReaderType uniquely identifies the OpenTelemetry Metric Reader component + // being instrumented. + manualReaderType = "go.opentelemetry.io/otel/sdk/metric/metric.ManualReader" +) + // ManualReader is a simple Reader that allows an application to // read metrics on demand. type ManualReader struct { @@ -26,6 +34,8 @@ type ManualReader struct { temporalitySelector TemporalitySelector aggregationSelector AggregationSelector + + inst *observ.Instrumentation } // Compile time check the manualReader implements Reader and is comparable. @@ -39,9 +49,24 @@ func NewManualReader(opts ...ManualReaderOption) *ManualReader { aggregationSelector: cfg.aggregationSelector, } r.externalProducers.Store(cfg.producers) + + var err error + r.inst, err = observ.NewInstrumentation(manualReaderType, nextManualReaderID()) + if err != nil { + otel.Handle(err) + } + return r } +var manualReaderIDCounter atomic.Int64 + +// nextManualReaderID returns an identifier for this manual reader, +// starting with 0 and incrementing by 1 each time it is called. +func nextManualReaderID() int64 { + return manualReaderIDCounter.Add(1) - 1 +} + // register stores the sdkProducer which enables the caller // to read metrics from the SDK on demand. func (mr *ManualReader) register(p sdkProducer) { @@ -93,12 +118,20 @@ func (mr *ManualReader) Shutdown(context.Context) error { // // This method is safe to call concurrently. func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + var err error + if mr.inst != nil { + cp := mr.inst.CollectMetrics(ctx) + defer func() { cp.End(err) }() + } + if rm == nil { - return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + err = errors.New("manual reader: *metricdata.ResourceMetrics is nil") + return err } p := mr.sdkProducer.Load() if p == nil { - return ErrReaderNotRegistered + err = ErrReaderNotRegistered + return err } ph, ok := p.(produceHolder) @@ -107,11 +140,11 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. - err := fmt.Errorf("manual reader: invalid producer: %T", p) + err = fmt.Errorf("manual reader: invalid producer: %T", p) return err } - err := ph.produce(ctx, rm) + err = ph.produce(ctx, rm) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go index 4da833cdce2..129cc6430dc 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -18,8 +18,9 @@ const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTempora var _Temporality_index = [...]uint8{0, 20, 41, 57} func (i Temporality) String() string { - if i >= Temporality(len(_Temporality_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Temporality_index)-1 { return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] + return _Temporality_name[_Temporality_index[idx]:_Temporality_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index f08c771a68f..e78402afc40 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -13,7 +13,9 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // Default periodic reader timing. @@ -126,9 +128,26 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri r.run(ctx, conf.interval) }() + var err error + r.inst, err = observ.NewInstrumentation( + semconv.OTelComponentTypePeriodicMetricReader.Value.AsString(), + nextPeriodicReaderID(), + ) + if err != nil { + otel.Handle(err) + } + return r } +var periodicReaderIDCounter atomic.Int64 + +// nextPeriodicReaderID returns an identifier for this periodic reader, +// starting with 0 and incrementing by 1 each time it is called. +func nextPeriodicReaderID() int64 { + return periodicReaderIDCounter.Add(1) - 1 +} + // PeriodicReader is a Reader that continuously collects and exports metric // data at a set interval. type PeriodicReader struct { @@ -148,6 +167,8 @@ type PeriodicReader struct { shutdownOnce sync.Once rmPool sync.Pool + + inst *observ.Instrumentation } // Compile time check the periodicReader implements Reader and is comparable. @@ -235,8 +256,15 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet // collect unwraps p as a produceHolder and returns its produce results. func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error { + var err error + if r.inst != nil { + cp := r.inst.CollectMetrics(ctx) + defer func() { cp.End(err) }() + } + if p == nil { - return ErrReaderNotRegistered + err = ErrReaderNotRegistered + return err } ph, ok := p.(produceHolder) @@ -245,11 +273,11 @@ func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.Reso // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. - err := fmt.Errorf("periodic reader: invalid producer: %T", p) + err = fmt.Errorf("periodic reader: invalid producer: %T", p) return err } - err := ph.produce(ctx, rm) + err = ph.produce(ctx, rm) if err != nil { return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index 5c1cea8254e..7b205c736c2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -127,10 +127,40 @@ type TemporalitySelector func(InstrumentKind) metricdata.Temporality // DefaultTemporalitySelector is the default TemporalitySelector used if // WithTemporalitySelector is not provided. CumulativeTemporality will be used // for all instrument kinds if this TemporalitySelector is used. -func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { +func DefaultTemporalitySelector(k InstrumentKind) metricdata.Temporality { + return CumulativeTemporalitySelector(k) +} + +// CumulativeTemporalitySelector is the TemporalitySelector that uses +// a cumulative temporality for all instrument kinds. +func CumulativeTemporalitySelector(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } +// DeltaTemporalitySelector is the TemporalitySelector that uses +// a delta temporality for instrument kinds: counter, histogram, observable counter +// All other instruments use cumulative temporality. +func DeltaTemporalitySelector(k InstrumentKind) metricdata.Temporality { + switch k { + case InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +// LowMemoryTemporalitySelector is the TemporalitySelector that uses +// delta temporality for counters and histograms. All other instruments use +// cumulative temporality. +func LowMemoryTemporalitySelector(k InstrumentKind) metricdata.Temporality { + switch k { + case InstrumentKindCounter, InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + // AggregationSelector selects the aggregation and the parameters to use for // that aggregation based on the InstrumentKind. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index dd9051a76c5..ae5b57b191e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go index cc8b8938ed5..4c1c30f2560 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build dragonfly || freebsd || netbsd || openbsd || solaris -// +build dragonfly freebsd netbsd openbsd solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go index f84f173240f..4a26096c8d0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build linux -// +build linux package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go index df12c44c564..63ad2fa4e05 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 3677c83d7da..2b8ca20b381 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build windows -// +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index 7252af79fc9..a1763267c22 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index a6ff26a4d27..6c50ab68677 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go index a77742b0771..25f629532a3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 9bc3e525d19..7d15cbb9c0f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -6,20 +6,14 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "errors" - "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" - "go.opentelemetry.io/otel/sdk/internal/env" - "go.opentelemetry.io/otel/sdk/trace/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" ) @@ -33,8 +27,6 @@ const ( DefaultMaxExportBatchSize = 512 ) -var queueFull = otelconv.ErrorTypeAttr("queue_full") - // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -78,10 +70,7 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 - selfObservabilityEnabled bool - callbackRegistration metric.Registration - spansProcessedCounter otelconv.SDKProcessorSpanProcessed - componentNameAttr attribute.KeyValue + inst *observ.BSP batch []ReadOnlySpan batchMutex sync.Mutex @@ -124,19 +113,14 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } - if x.SelfObservability.Enabled() { - bsp.selfObservabilityEnabled = true - bsp.componentNameAttr = componentName() - - var err error - bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( - bsp.componentNameAttr, - func() int64 { return int64(len(bsp.queue)) }, - int64(bsp.o.MaxQueueSize), - ) - if err != nil { - otel.Handle(err) - } + var err error + bsp.inst, err = observ.NewBSP( + nextProcessorID(), + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) } bsp.stopWait.Add(1) @@ -157,51 +141,6 @@ func nextProcessorID() int64 { return processorIDCounter.Add(1) - 1 } -func componentName() attribute.KeyValue { - id := nextProcessorID() - name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) - return semconv.OTelComponentName(name) -} - -// newBSPObs creates and returns a new set of metrics instruments and a -// registration for a BatchSpanProcessor. It is the caller's responsibility -// to unregister the registration when it is no longer needed. -func newBSPObs( - cmpnt attribute.KeyValue, - qLen func() int64, - qMax int64, -) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { - meter := otel.GetMeterProvider().Meter( - selfObsScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) - - qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) - err = errors.Join(err, e) - - spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) - err = errors.Join(err, e) - - cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor - attrs := metric.WithAttributes(cmpnt, cmpntT) - - reg, e := meter.RegisterCallback( - func(_ context.Context, o metric.Observer) error { - o.ObserveInt64(qSize.Inst(), qLen(), attrs) - o.ObserveInt64(qCap.Inst(), qMax, attrs) - return nil - }, - qSize.Inst(), - qCap.Inst(), - ) - err = errors.Join(err, e) - - return spansProcessed, reg, err -} - // OnStart method does nothing. func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} @@ -242,8 +181,8 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } - if bsp.selfObservabilityEnabled { - err = errors.Join(err, bsp.callbackRegistration.Unregister()) + if bsp.inst != nil { + err = errors.Join(err, bsp.inst.Shutdown()) } }) return err @@ -357,10 +296,8 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, int64(l), - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + if bsp.inst != nil { + bsp.inst.Processed(ctx, int64(l)) } err := bsp.e.ExportSpans(ctx, bsp.batch) @@ -470,11 +407,8 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, 1, - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), - bsp.spansProcessedCounter.AttrErrorType(queueFull)) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) } return false } @@ -490,11 +424,8 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) return true default: atomic.AddUint32(&bsp.dropped, 1) - if bsp.selfObservabilityEnabled { - bsp.spansProcessedCounter.Add(ctx, 1, - bsp.componentNameAttr, - bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), - bsp.spansProcessedCounter.AttrErrorType(queueFull)) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) } } return false diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index e58e7f6ed78..b502c7d4798 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -7,7 +7,7 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. -See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +See [go.opentelemetry.io/otel/sdk/internal/x] for information about the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go rename to vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go index e3309231d42..58f68df4417 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go @@ -3,7 +3,7 @@ // Package env provides types and functionality for environment variable support // in the OpenTelemetry SDK. -package env // import "go.opentelemetry.io/otel/sdk/internal/env" +package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env" import ( "os" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go new file mode 100644 index 00000000000..bd7fe236296 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the name of the instrumentation scope. + ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ" + + // SchemaURL is the schema URL of the instrumentation. + SchemaURL = semconv.SchemaURL +) + +// ErrQueueFull is the attribute value for the "queue_full" error type. +var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType( + otelconv.ErrorTypeAttr("queue_full"), +) + +// BSPComponentName returns the component name attribute for a +// BatchSpanProcessor with the given ID. +func BSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeBatchingSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// BSP is the instrumentation for an OTel SDK BatchSpanProcessor. +type BSP struct { + reg metric.Registration + + processed metric.Int64Counter + processedOpts []metric.AddOption + processedQueueFullOpts []metric.AddOption +} + +func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + if err != nil { + err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err) + } + qCapInst := qCap.Inst() + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP queue size metric: %w", e) + err = errors.Join(err, e) + } + qSizeInst := qSize.Inst() + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + cmpnt := BSPComponentName(id) + set := attribute.NewSet(cmpnt, cmpntT) + + obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)} + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSizeInst, qLen(), obsOpts...) + o.ObserveInt64(qCapInst, qMax, obsOpts...) + return nil + }, + qSizeInst, + qCapInst, + ) + if e != nil { + e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e) + err = errors.Join(err, e) + } + + processed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP processed spans metric: %w", e) + err = errors.Join(err, e) + } + processedOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull) + processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + return &BSP{ + reg: reg, + processed: processed.Inst(), + processedOpts: processedOpts, + processedQueueFullOpts: processedQueueFullOpts, + }, err +} + +func (b *BSP) Shutdown() error { return b.reg.Unregister() } + +func (b *BSP) Processed(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedOpts...) +} + +func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedQueueFullOpts...) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go new file mode 100644 index 00000000000..b542121e6a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability instrumentation for the OTel trace SDK +// package. +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go new file mode 100644 index 00000000000..7d33870613a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +var measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, +} + +// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor. +type SSP struct { + spansProcessedCounter metric.Int64Counter + addOpts []metric.AddOption + attrs []attribute.KeyValue +} + +// SSPComponentName returns the component name attribute for a +// SimpleSpanProcessor with the given ID. +func SSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeSimpleSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the +// provided ID. +// +// If the experimental observability is disabled, nil is returned. +func NewSSP(id int64) (*SSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter) + if err != nil { + err = fmt.Errorf("failed to create SSP processed spans metric: %w", err) + } + + componentName := SSPComponentName(id) + componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor) + attrs := []attribute.KeyValue{componentName, componentType} + addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} + + return &SSP{ + spansProcessedCounter: spansProcessedCounter.Inst(), + addOpts: addOpts, + attrs: attrs, + }, err +} + +// SpanProcessed records that a span has been processed by the SimpleSpanProcessor. +// If err is non-nil, it records the processing error as an attribute. +func (ssp *SSP) SpanProcessed(ctx context.Context, err error) { + ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...) +} + +func (ssp *SSP) addOption(err error) []metric.AddOption { + if err == nil { + return ssp.addOpts + } + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, ssp.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go new file mode 100644 index 00000000000..a8a16458981 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/trace" +) + +var meterOpts = []metric.MeterOption{ + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), +} + +// Tracer is instrumentation for an OTel SDK Tracer. +type Tracer struct { + enabled bool + + live metric.Int64UpDownCounter + started metric.Int64Counter +} + +func NewTracer() (Tracer, error) { + if !x.Observability.Enabled() { + return Tracer{}, nil + } + meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...) + + var err error + l, e := otelconv.NewSDKSpanLive(meter) + if e != nil { + e = fmt.Errorf("failed to create span live metric: %w", e) + err = errors.Join(err, e) + } + + s, e := otelconv.NewSDKSpanStarted(meter) + if e != nil { + e = fmt.Errorf("failed to create span started metric: %w", e) + err = errors.Join(err, e) + } + + return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err +} + +func (t Tracer) Enabled() bool { return t.enabled } + +func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + key := spanStartedKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + opts := spanStartedOpts[key] + t.started.Add(ctx, 1, opts...) +} + +func (t Tracer) SpanLive(ctx context.Context, span trace.Span) { + t.spanLive(ctx, 1, span) +} + +func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { + t.spanLive(ctx, -1, span) +} + +func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} + opts := spanLiveOpts[key] + t.live.Add(ctx, value, opts...) +} + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedKey struct { + parent parentState + sampling samplingState +} + +var spanStartedOpts = map[spanStartedKey][]metric.AddOption{ + { + parentStateNoParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateLocalParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateRemoteParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + + { + parentStateNoParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateLocalParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + + { + parentStateNoParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateLocalParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, +} + +type spanLiveKey struct { + sampled bool +} + +var spanLiveOpts = map[spanLiveKey][]metric.AddOption{ + {true}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + )), + }, + {false}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + )), + }, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 37ce2ac876a..d2cf4ebd3e7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -5,29 +5,21 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" - "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - "go.opentelemetry.io/otel/sdk/trace/internal/x" - semconv "go.opentelemetry.io/otel/semconv/v1.37.0" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) -const ( - defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" - selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" -) +const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" // tracerProviderConfig. type tracerProviderConfig struct { @@ -163,19 +155,16 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, - selfObservabilityEnabled: x.SelfObservability.Enabled(), + provider: p, + instrumentationScope: is, } - if t.selfObservabilityEnabled { - var err error - t.spanLiveMetric, t.spanStartedMetric, err = newInst() - if err != nil { - msg := "failed to create self-observability metrics for tracer: %w" - err := fmt.Errorf(msg, err) - otel.Handle(err) - } + + var err error + t.inst, err = observ.NewTracer() + if err != nil { + otel.Handle(err) } + p.namedTracer[is] = t } return t, ok @@ -201,23 +190,6 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { - m := otel.GetMeterProvider().Meter( - selfObsScopeName, - metric.WithInstrumentationVersion(sdk.Version()), - metric.WithSchemaURL(semconv.SchemaURL), - ) - - var err error - spanLiveMetric, e := otelconv.NewSDKSpanLive(m) - err = errors.Join(err, e) - - spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) - err = errors.Join(err, e) - - return spanLiveMetric, spanStartedMetric, err -} - // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 411d9ccdd78..771e427a4c5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" + "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" + "go.opentelemetry.io/otel/trace" ) // simpleSpanProcessor is a SpanProcessor that synchronously sends all @@ -17,6 +20,8 @@ type simpleSpanProcessor struct { exporterMu sync.Mutex exporter SpanExporter stopOnce sync.Once + + inst *observ.SSP } var _ SpanProcessor = (*simpleSpanProcessor)(nil) @@ -33,11 +38,26 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, } + + var err error + ssp.inst, err = observ.NewSSP(nextSimpleProcessorID()) + if err != nil { + otel.Handle(err) + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") return ssp } +var simpleProcessorIDCounter atomic.Int64 + +// nextSimpleProcessorID returns an identifier for this simple span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextSimpleProcessorID() int64 { + return simpleProcessorIDCounter.Add(1) - 1 +} + // OnStart does nothing. func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} @@ -46,11 +66,20 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { ssp.exporterMu.Lock() defer ssp.exporterMu.Unlock() + var err error if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}) + if err != nil { otel.Handle(err) } } + + if ssp.inst != nil { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext()) + ssp.inst.SpanProcessed(ctx, err) + } } // Shutdown shuts down the exporter this SimpleSpanProcessor exports to. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index b376051fbb8..8cfd9f62e3f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -151,6 +151,12 @@ type recordingSpan struct { // tracer is the SDK tracer that created this span. tracer *tracer + + // origCtx is the context used when starting this span that has the + // recordingSpan instance set as the active span. If not nil, it is used + // when ending the span to ensure any metrics are recorded with a context + // containing this span without requiring an additional allocation. + origCtx context.Context } var ( @@ -158,6 +164,10 @@ var ( _ runtimeTracer = (*recordingSpan)(nil) ) +func (s *recordingSpan) setOrigCtx(ctx context.Context) { + s.origCtx = ctx +} + // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { @@ -496,14 +506,15 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() - if s.tracer.selfObservabilityEnabled { - defer func() { - // Add the span to the context to ensure the metric is recorded - // with the correct span context. - ctx := trace.ContextWithSpan(context.Background(), s) - set := spanLiveSet(s.spanContext.IsSampled()) - s.tracer.spanLiveMetric.AddSet(ctx, -1, set) - }() + if s.tracer.inst.Enabled() { + ctx := s.origCtx + if ctx == nil { + // This should not happen as the origCtx should be set, but + // ensure trace information is propagated in the case of an + // error. + ctx = trace.ContextWithSpan(context.Background(), s) + } + defer s.tracer.inst.SpanEnded(ctx, s) } sps := s.tracer.provider.getSpanProcessors() diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go index bec5e209787..321d9743058 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -3,7 +3,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" -import "go.opentelemetry.io/otel/sdk/internal/env" +import "go.opentelemetry.io/otel/sdk/trace/internal/env" const ( // DefaultAttributeValueLengthLimit is the default maximum allowed diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index e965c4cce86..e1d08fd4d8d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -7,9 +7,8 @@ import ( "context" "time" - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -20,9 +19,7 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope - selfObservabilityEnabled bool - spanLiveMetric otelconv.SDKSpanLive - spanStartedMetric otelconv.SDKSpanStarted + inst observ.Tracer } var _ trace.Tracer = &tracer{} @@ -53,10 +50,17 @@ func (tr *tracer) Start( s := tr.newSpan(ctx, name, &config) newCtx := trace.ContextWithSpan(ctx, s) - if tr.selfObservabilityEnabled { + if tr.inst.Enabled() { + if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok { + // If this is a recording span, store the original context. + // This allows later retrieval of baggage and other information + // that may have been stored in the context at span start time and + // to avoid the allocation of repeatedly calling + // trace.ContextWithSpan. + o.setOrigCtx(newCtx) + } psc := trace.SpanContextFromContext(ctx) - set := spanStartedSet(psc, s) - tr.spanStartedMetric.AddSet(newCtx, 1, set) + tr.inst.SpanStarted(newCtx, psc, s) } if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { @@ -168,12 +172,11 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) - if tr.selfObservabilityEnabled { + if tr.inst.Enabled() { // Propagate any existing values from the context with the new span to // the measurement context. ctx = trace.ContextWithSpan(ctx, s) - set := spanLiveSet(s.spanContext.IsSampled()) - tr.spanLiveMetric.AddSet(ctx, 1, set) + tr.inst.SpanLive(ctx, s) } return s @@ -183,112 +186,3 @@ func (tr *tracer) newRecordingSpan( func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } - -type parentState int - -const ( - parentStateNoParent parentState = iota - parentStateLocalParent - parentStateRemoteParent -) - -type samplingState int - -const ( - samplingStateDrop samplingState = iota - samplingStateRecordOnly - samplingStateRecordAndSample -) - -type spanStartedSetKey struct { - parent parentState - sampling samplingState -} - -var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ - {parentStateNoParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), - ), - - {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), - ), - - {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), - {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), - {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( - otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), - otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), - ), -} - -func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { - key := spanStartedSetKey{ - parent: parentStateNoParent, - sampling: samplingStateDrop, - } - - if psc.IsValid() { - if psc.IsRemote() { - key.parent = parentStateRemoteParent - } else { - key.parent = parentStateLocalParent - } - } - - if span.IsRecording() { - if span.SpanContext().IsSampled() { - key.sampling = samplingStateRecordAndSample - } else { - key.sampling = samplingStateRecordOnly - } - } - - return spanStartedSetCache[key] -} - -type spanLiveSetKey struct { - sampled bool -} - -var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ - {true}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordAndSample, - ), - ), - {false}: attribute.NewSet( - otelconv.SDKSpanLive{}.AttrSpanSamplingResult( - otelconv.SpanSamplingResultRecordOnly, - ), - ), -} - -func spanLiveSet(sampled bool) attribute.Set { - key := spanLiveSetKey{sampled: sampled} - return spanLiveSetCache[key] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 7f97cc31e51..0a3b3661910 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index 58b5eddef66..f18d6e3f227 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -121,7 +121,7 @@ func hostIPNamePort(hostWithPort string) (ip, name string, port int) { if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } - return + return ip, name, port } // EndUserAttributesFromHTTPRequest generates attributes of the diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md deleted file mode 100644 index 2de1fc3c6be..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.26.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go deleted file mode 100644 index d8dc822b263..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go +++ /dev/null @@ -1,8996 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -import "go.opentelemetry.io/otel/attribute" - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: stable - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming - // to the "aspnetcore.diagnostics.exception.result" semantic conventions. - // It represents the aSP.NET Core exception middleware handling result - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'handled', 'unhandled' - AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") - - // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the - // "aspnetcore.routing.match_status" semantic conventions. It represents - // the match result - success or failure - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'success', 'failure' - AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -var ( - // Exception was handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") - // Exception was not handled by the exception handling middleware - AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") - // Exception handling was skipped because the response had started - AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") - // Exception handling didn't run because the request was aborted - AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") -) - -var ( - // Match succeeded - AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") - // Match failed - AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// Generic attributes for AWS services. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes for AWS DynamoDB. -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// Attributes for AWS Elastic Container Service (ECS). -const ( - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID - // MUST be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is - // populated.) - // Stability: experimental - // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', - // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a - // running [ECS - // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', - // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family - // name of the [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) - // used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSTaskID returns an attribute KeyValue conforming to the -// "aws.ecs.task.id" semantic conventions. It represents the ID of a running -// ECS task. The ID MUST be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS -// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) -// used to create the ECS task. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Attributes for AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Attributes for AWS Logs. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Attributes for AWS Lambda. -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for AWS S3. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// The web browser attributes -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Attributes for CloudEvents. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCPUStateKey is the attribute Key conforming to the - // "container.cpu.state" semantic conventions. It represents the CPU state - // for this data point. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'user', 'kernel' - ContainerCPUStateKey = attribute.Key("container.cpu.state") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -var ( - // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) - ContainerCPUStateUser = ContainerCPUStateKey.String("user") - // When CPU is used by the system (host OS) - ContainerCPUStateSystem = ContainerCPUStateKey.String("system") - // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) - ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// This group defines the attributes used to describe telemetry in the context -// of databases. -const ( - // DBClientConnectionsPoolNameKey is the attribute Key conforming to the - // "db.client.connections.pool.name" semantic conventions. It represents - // the name of the connection pool; unique within the instrumented - // application. In case the connection pool implementation doesn't provide - // a name, instrumentation should use a combination of `server.address` and - // `server.port` attributes formatted as `server.address:server.port`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myDataSource' - DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") - - // DBClientConnectionsStateKey is the attribute Key conforming to the - // "db.client.connections.state" semantic conventions. It represents the - // state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle' - DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: If the collection name is parsed from the query, it SHOULD match - // the value provided in the query and may be qualified with the schema and - // database name. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" - // semantic conventions. It represents the name of the database, fully - // qualified within the server address and port. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'test.users' - // Note: If a database system has multiple namespace components, they - // SHOULD be concatenated (potentially using database system specific - // conventions) from most general to most specific namespace component, and - // more specific namespaces SHOULD NOT be captured without the more general - // namespaces, to ensure that "startswith" queries for the more general - // namespaces will be valid. - // Semantic conventions for individual database systems SHOULD document - // what `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationNameKey is the attribute Key conforming to the - // "db.operation.name" semantic conventions. It represents the name of the - // operation or command being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: It is RECOMMENDED to capture the value as provided by the - // application without attempting to do any case normalization. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey - // "WuValue"' - DBQueryTextKey = attribute.Key("db.query.text") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents the database management system (DBMS) product - // as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual DBMS may differ from the one identified by the client. - // For example, when using PostgreSQL client libraries to connect to a - // CockroachDB, the `db.system` is set to `postgresql` based on the - // instrumentation's best knowledge. - DBSystemKey = attribute.Key("db.system") -) - -var ( - // idle - DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") - // used - DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBClientConnectionsPoolName returns an attribute KeyValue conforming to -// the "db.client.connections.pool.name" semantic conventions. It represents -// the name of the connection pool; unique within the instrumented application. -// In case the connection pool implementation doesn't provide a name, -// instrumentation should use a combination of `server.address` and -// `server.port` attributes formatted as `server.address:server.port`. -func DBClientConnectionsPoolName(val string) attribute.KeyValue { - return DBClientConnectionsPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the -// "db.namespace" semantic conventions. It represents the name of the database, -// fully qualified within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the -// "db.query.text" semantic conventions. It represents the database query being -// executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// This group defines attributes for Cassandra. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// This group defines attributes for Azure Cosmos DB. -const ( - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// This group defines attributes for Elasticsearch. -const ( - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") -) - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// Attributes for software deployments. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// Attributes that represents an occurrence of a lifecycle transition on the -// Android platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the deprecated use the - // `device.app.lifecycle` event definition including `android.state` as a - // payload field instead. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report a DNS query. -const ( - // DNSQuestionNameKey is the attribute Key conforming to the - // "dns.question.name" semantic conventions. It represents the name being - // queried. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.example.com', 'opentelemetry.io' - // Note: If the name field contains non-printable characters (below 32 or - // above 126), those characters should be represented as escaped base 10 - // integers (\DDD). Back slashes and quotes should be escaped. Tabs, - // carriage returns, and line feeds should be converted to \t, \r, and \n - // respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Attributes for operations with an authenticated and/or authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be - // used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// FaaS attributes -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Attributes for Feature Flags. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// Describes file attributes. -const ( - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is - // located. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/user', 'C:\\Program Files\\MyApp' - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the - // leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: When the file name has multiple extensions (example.tar.gz), only - // the last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.png' - FileNameKey = attribute.Key("file.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/home/alice/example.png', 'C:\\Program - // Files\\MyApp\\myapp.exe' - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - FileSizeKey = attribute.Key("file.size") -) - -// FileDirectory returns an attribute KeyValue conforming to the -// "file.directory" semantic conventions. It represents the directory where the -// file is located. It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the -// "file.extension" semantic conventions. It represents the file extension, -// excluding the leading dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" -// semantic conventions. It represents the name of the file including the -// extension, without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" -// semantic conventions. It represents the full path to the file, including the -// file name. It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" -// semantic conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// Attributes for Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Attributes for Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// The attributes used to describe telemetry in the context of LLM (Large -// Language Models) requests and responses. -const ( - // GenAiCompletionKey is the attribute Key conforming to the - // "gen_ai.completion" semantic conventions. It represents the full - // response received from the LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'assistant', 'content': 'The capital of France is - // Paris.'}]" - // Note: It's RECOMMENDED to format completions as JSON string matching - // [OpenAI messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiCompletionKey = attribute.Key("gen_ai.completion") - - // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" - // semantic conventions. It represents the full prompt sent to an LLM. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: "[{'role': 'user', 'content': 'What is the capital of - // France?'}]" - // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI - // messages - // format](https://platform.openai.com/docs/guides/text-generation) - GenAiPromptKey = attribute.Key("gen_ai.prompt") - - // GenAiRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the - // maximum number of tokens the LLM generates for a request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAiRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of - // the LLM a request is being made to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4' - GenAiRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAiRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0.0 - GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAiRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p - // sampling setting for the LLM request. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0 - GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAiResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to - // each generation received. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'stop' - GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAiResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'chatcmpl-123' - GenAiResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAiResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of - // the LLM a response was generated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gpt-4-0613' - GenAiResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as - // identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'openai' - // Note: The actual GenAI product may differ from the one identified by the - // client. For example, when using OpenAI client libraries to communicate - // with Mistral, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge. - GenAiSystemKey = attribute.Key("gen_ai.system") - - // GenAiUsageCompletionTokensKey is the attribute Key conforming to the - // "gen_ai.usage.completion_tokens" semantic conventions. It represents the - // number of tokens used in the LLM response (completion). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 180 - GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") - - // GenAiUsagePromptTokensKey is the attribute Key conforming to the - // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the - // number of tokens used in the LLM prompt. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") -) - -var ( - // OpenAI - GenAiSystemOpenai = GenAiSystemKey.String("openai") -) - -// GenAiCompletion returns an attribute KeyValue conforming to the -// "gen_ai.completion" semantic conventions. It represents the full response -// received from the LLM. -func GenAiCompletion(val string) attribute.KeyValue { - return GenAiCompletionKey.String(val) -} - -// GenAiPrompt returns an attribute KeyValue conforming to the -// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to -// an LLM. -func GenAiPrompt(val string) attribute.KeyValue { - return GenAiPromptKey.String(val) -} - -// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the LLM generates for a request. -func GenAiRequestMaxTokens(val int) attribute.KeyValue { - return GenAiRequestMaxTokensKey.Int(val) -} - -// GenAiRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// LLM a request is being made to. -func GenAiRequestModel(val string) attribute.KeyValue { - return GenAiRequestModelKey.String(val) -} - -// GenAiRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the LLM request. -func GenAiRequestTemperature(val float64) attribute.KeyValue { - return GenAiRequestTemperatureKey.Float64(val) -} - -// GenAiRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p -// sampling setting for the LLM request. -func GenAiRequestTopP(val float64) attribute.KeyValue { - return GenAiRequestTopPKey.Float64(val) -} - -// GenAiResponseFinishReasons returns an attribute KeyValue conforming to -// the "gen_ai.response.finish_reasons" semantic conventions. It represents the -// array of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAiResponseFinishReasonsKey.StringSlice(val) -} - -// GenAiResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique -// identifier for the completion. -func GenAiResponseID(val string) attribute.KeyValue { - return GenAiResponseIDKey.String(val) -} - -// GenAiResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// LLM a response was generated from. -func GenAiResponseModel(val string) attribute.KeyValue { - return GenAiResponseModelKey.String(val) -} - -// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to -// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the -// number of tokens used in the LLM response (completion). -func GenAiUsageCompletionTokens(val int) attribute.KeyValue { - return GenAiUsageCompletionTokensKey.Int(val) -} - -// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number -// of tokens used in the LLM prompt. -func GenAiUsagePromptTokens(val int) attribute.KeyValue { - return GenAiUsagePromptTokensKey.Int(val) -} - -// Attributes for GraphQL. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Attributes for the Android platform on which the Android application is -// running. -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1', 'r1p1' - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of - // the HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'active', 'idle' - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the - // "http.request.size" semantic conventions. It represents the total size - // of the request in bytes. This should be the total number of bytes sent - // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 - // and HTTP/3), headers, and request body if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size - // of the response in bytes. This should be the total number of bytes sent - // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and - // HTTP/3), headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1437 - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // active state - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of -// the request in bytes. This should be the total number of bytes sent over the -// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of -// the response in bytes. This should be the total number of bytes sent over -// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Java Virtual machine related attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") - - // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" - // semantic conventions. It represents the name of the garbage collector - // action. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'end of minor GC', 'end of major GC' - // Note: Garbage collector action is generally obtained via - // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). - JvmGcActionKey = attribute.Key("jvm.gc.action") - - // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" - // semantic conventions. It represents the name of the garbage collector. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Young Generation', 'G1 Old Generation' - // Note: Garbage collector name is generally obtained via - // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). - JvmGcNameKey = attribute.Key("jvm.gc.name") - - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") - - // JvmThreadDaemonKey is the attribute Key conforming to the - // "jvm.thread.daemon" semantic conventions. It represents the whether the - // thread is daemon or not. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: stable - JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") - - // JvmThreadStateKey is the attribute Key conforming to the - // "jvm.thread.state" semantic conventions. It represents the state of the - // thread. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'runnable', 'blocked' - JvmThreadStateKey = attribute.Key("jvm.thread.state") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -var ( - // A thread that has not yet started is in this state - JvmThreadStateNew = JvmThreadStateKey.String("new") - // A thread executing in the Java virtual machine is in this state - JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") - // A thread that is blocked waiting for a monitor lock is in this state - JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") - // A thread that is waiting indefinitely for another thread to perform a particular action is in this state - JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") - // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state - JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") - // A thread that has exited is in this state - JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// JvmGcAction returns an attribute KeyValue conforming to the -// "jvm.gc.action" semantic conventions. It represents the name of the garbage -// collector action. -func JvmGcAction(val string) attribute.KeyValue { - return JvmGcActionKey.String(val) -} - -// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" -// semantic conventions. It represents the name of the garbage collector. -func JvmGcName(val string) attribute.KeyValue { - return JvmGcNameKey.String(val) -} - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// JvmThreadDaemon returns an attribute KeyValue conforming to the -// "jvm.thread.daemon" semantic conventions. It represents the whether the -// thread is daemon or not. -func JvmThreadDaemon(val bool) attribute.KeyValue { - return JvmThreadDaemonKey.Bool(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key - // conforming to the "k8s.container.status.last_terminated_reason" semantic - // conventions. It represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Evicted', 'Error' - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Attributes for a file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// The generic attributes that may be used in any Log Record. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to - // the "messaging.destination.partition.id" semantic conventions. It - // represents the identifier of the partition messages are sent to or - // received from, unique within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1' - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack', 'nack', 'send' - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents the messaging - // system as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate - // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on - // the instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are delivered to or processed by a consumer - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") - // One or more messages are settled - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming -// to the "messaging.destination.partition.id" semantic conventions. It -// represents the identifier of the partition messages are sent to or received -// from, unique within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// This group describes attributes specific to Apache Kafka. -const ( - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") -) - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// This group describes attributes specific to RabbitMQ. -const ( - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming - // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. - // It represents the rabbitMQ message delivery tag - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 123 - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") -) - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic -// conventions. It represents the rabbitMQ message delivery tag -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// This group describes attributes specific to RocketMQ. -const ( - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// This group describes attributes specific to GCP Pub/Sub. -const ( - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. - // It represents the ack deadline in seconds set for the modify ack - // deadline request. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It - // represents the ack id for a given message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ack_id' - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key - // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" - // semantic conventions. It represents the delivery attempt for a given - // message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") -) - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic -// conventions. It represents the ack deadline in seconds set for the modify -// ack deadline request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It -// represents the ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// This group describes attributes specific to Azure Service Bus. -const ( - // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key - // conforming to the "messaging.servicebus.destination.subscription_name" - // semantic conventions. It represents the name of the subscription in the - // topic messages are received from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mySubscription' - MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming - // to the "messaging.servicebus.disposition_status" semantic conventions. - // It represents the describes the [settlement - // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key - // conforming to the "messaging.servicebus.message.delivery_count" semantic - // conventions. It represents the number of deliveries that have been - // attempted for this message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key - // conforming to the "messaging.servicebus.message.enqueued_time" semantic - // conventions. It represents the UTC epoch seconds at which the message - // has been accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") -) - -var ( - // Message is completed - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// MessagingServicebusDestinationSubscriptionName returns an attribute -// KeyValue conforming to the -// "messaging.servicebus.destination.subscription_name" semantic conventions. -// It represents the name of the subscription in the topic messages are -// received from. -func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingServicebusDestinationSubscriptionNameKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// This group describes attributes specific to Azure Event Hubs. -const ( - // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to - // the "messaging.eventhubs.consumer.group" semantic conventions. It - // represents the name of the consumer group the event consumer is - // associated with. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'indexer' - MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming - // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. - // It represents the UTC epoch seconds at which the message has been - // accepted and stored in the entity. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1701393730 - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") -) - -// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming -// to the "messaging.eventhubs.consumer.group" semantic conventions. It -// represents the name of the consumer group the event consumer is associated -// with. -func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { - return MessagingEventhubsConsumerGroupKey.String(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.eventhubs.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has -// been accepted and stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // actual version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.1', '2' - // Note: If protocol version is subject to negotiation (for example using - // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute - // SHOULD be set to the negotiated version. If the actual protocol version - // is not known, this attribute SHOULD NOT be set. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Attributes used by the OpenTracing Shim layer. -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Attributes reserved for OpenTelemetry -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were - // voluntary or involuntary. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and - // time the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:25:34.853Z' - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the - // "process.exit.code" semantic conventions. It represents the exit code of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the - // "process.exit.time" semantic conventions. It represents the date and - // time the process exited, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2023-11-21T09:26:12.315Z' - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID - // of the process's group leader. This is also the process group ID (PGID) - // of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether - // the process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type - // of page fault for this data point. Type `major` is for major/hard page - // faults, and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user - // ID (RUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the - // username of the real user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved - // user ID (SUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the - // username of the saved user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'operator' - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID - // of the process's session leader. This is also the session ID (SID) of - // the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessUserIDKey is the attribute Key conforming to the - // "process.user.id" semantic conventions. It represents the effective user - // ID (EUID) of the process. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the - // "process.user.name" semantic conventions. It represents the username of - // the effective user of the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" - // semantic conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily - // unique across all processes on the host but it is unique within the - // process namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") -) - -var ( - // voluntary - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -var ( - // major - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and -// time the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time -// the process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of -// the process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user -// ID (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username -// of the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the -// "process.vpid" semantic conventions. It represents the virtual process -// identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// Attributes for process CPU -const ( - // ProcessCPUStateKey is the attribute Key conforming to the - // "process.cpu.state" semantic conventions. It represents the CPU state of - // the process. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - ProcessCPUStateKey = attribute.Key("process.cpu.state") -) - -var ( - // system - ProcessCPUStateSystem = ProcessCPUStateKey.String("system") - // user - ProcessCPUStateUser = ProcessCPUStateKey.String("user") - // wait - ProcessCPUStateWait = ProcessCPUStateKey.String("wait") -) - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the - // "rpc.message.type" semantic conventions. It represents the whether this - // is a received or sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // sent - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the -// "rpc.message.id" semantic conventions. It represents the mUST be calculated -// as two different counters starting from `1` one for sent messages and one -// for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to -// the "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to - // distinguish instances of the same service that exist at the same time - // (e.g. instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random - // Version 1 or Version 4 [RFC - // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an - // inherent unique ID as the source of - // this value if stability is desirable. In that case, the ID SHOULD be - // used as source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the - // purposes of identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) - // file, the underlying - // data, such as pod name and namespace should be treated as confidential, - // being the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we - // do not recommend using one identifier - // for all processes participating in the application. Instead, it's - // recommended each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it - // can't unambiguously determine the - // service instance that is generating that telemetry. For instance, - // creating an UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container - // within that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, - // as they know the target address and - // port. - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If - // `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Describes System attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process attributes -const ( - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -var ( - // running - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Attributes for telemetry SDK. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" - // semantic conventions. It represents the domain extracted from the - // `url.full`, such as "opentelemetry.io". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', - // '[1080:0:0:0:8:800:200C:417A]' - // Note: In some cases a URL may refer to an IP and/or port directly, - // without a domain name. In this case, the IP address would go to the - // domain field. If the URL contains a [literal IPv6 - // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by - // `[` and `]`, the `[` and `]` characters should also be captured in the - // domain field. - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from - // the `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'png', 'gz' - // Note: The file extension is only set if it exists, as not every url has - // a file extension. When the file name has multiple extensions - // `example.tar.gz`, only the last one should be captured `gz`, not - // `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed). Sensitive content provided in `url.full` SHOULD be - // scrubbed when instrumentations can identify it. - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" - // semantic conventions. It represents the unmodified original URL as seen - // in the event source. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // 'search?q=OpenTelemetry' - // Note: In network monitoring, the observed URL may be a full URL, whereas - // in access logs, the URL is often just represented as a path. This field - // is meant to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the - // same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full` - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'example.com', 'foo.co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). For example, the registered domain for - // `foo.example.com` is `example.com`. Trying to approximate this by simply - // taking the last two labels will not work well for TLDs such as `co.uk`. - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name - // under the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain - // contains all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'east', 'sub2.sub1' - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If - // the domain has multiple levels of subdomain, such as - // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, - // with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" - // semantic conventions. It represents the low-cardinality template of an - // [absolute path - // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/users/{id}', '/users/:id', '/users?id={id}' - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective - // top level domain (eTLD), also known as the domain suffix, is the last - // part of the domain name. For example, the top level domain for - // example.com is `com`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com', 'co.uk' - // Note: This value can be determined precisely with the [public suffix - // list](http://publicsuffix.org). - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the -// `url.full`, such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the -// "url.extension" semantic conventions. It represents the file extension -// extracted from the `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the -// "url.original" semantic conventions. It represents the unmodified original -// URL as seen in the event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" -// semantic conventions. It represents the port extracted from the `url.full` -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the -// "url.subdomain" semantic conventions. It represents the subdomain portion of -// a fully qualified domain name includes all of the names except the host name -// under the registered_domain. In a partially qualified domain, or if the -// qualification level of the full name cannot be determined, subdomain -// contains all of the names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the -// "url.template" semantic conventions. It represents the low-cardinality -// template of an [absolute path -// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of -// the domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentNameKey is the attribute Key conforming to the - // "user_agent.name" semantic conventions. It represents the name of the - // user-agent extracted from original. Usually refers to the browser's - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Safari', 'YourApp' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's name - // from original string. In the case of using a user-agent for non-browser - // products, such as microservices with multiple names/versions inside the - // `user_agent.original`, the most significant name SHOULD be selected. In - // such a scenario it should align with `user_agent.version` - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 - // grpc-java-okhttp/1.27.2' - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of - // the user-agent extracted from original. Usually refers to the browser's - // version - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.1.2', '1.0.0' - // Note: [Example](https://www.whatsmyua.info) of extracting browser's - // version from original string. In the case of using a user-agent for - // non-browser products, such as microservices with multiple names/versions - // inside the `user_agent.original`, the most significant version SHOULD be - // selected. In such a scenario it should align with `user_agent.name` - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// The attributes used to describe the packaged software running the -// application code. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go deleted file mode 100644 index d031bbea784..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.26.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go deleted file mode 100644 index bfaee0d56e3..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go deleted file mode 100644 index fcdb9f48596..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -const ( - - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: Experimental - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: Experimental - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Stable - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Stable - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Stable - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Stable - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // measures the duration of process operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Measures the duration of process operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // measures the number of processed messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Measures the number of processed messages." - - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: Experimental - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: Experimental - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Experimental - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: Experimental - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: Experimental - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go deleted file mode 100644 index 4c87c7adcc7..00000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go index 666bded4baf..267979c051d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -4,28 +4,53 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" import ( - "fmt" "reflect" "go.opentelemetry.io/otel/attribute" ) // ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. func ErrorType(err error) attribute.KeyValue { if err == nil { return ErrorTypeOther } - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. - if value == "" { - return ErrorTypeOther + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } } - return ErrorTypeKey.String(value) + return s } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/goconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/goconv/metric.go index fe9e2933e17..ba37763b934 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/goconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/goconv/metric.go @@ -3,7 +3,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package httpconv provides types and functionality for OpenTelemetry semantic +// Package goconv provides types and functionality for OpenTelemetry semantic // conventions in the "go" namespace. package goconv @@ -41,6 +41,11 @@ type ConfigGogc struct { metric.Int64ObservableUpDownCounter } +var newConfigGogcOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."), + metric.WithUnit("%"), +} + // NewConfigGogc returns a new ConfigGogc instrument. func NewConfigGogc( m metric.Meter, @@ -51,15 +56,18 @@ func NewConfigGogc( return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newConfigGogcOpts + } else { + opt = append(opt, newConfigGogcOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "go.config.gogc", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."), - metric.WithUnit("%"), - }, opt...)..., + opt..., ) if err != nil { - return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, err + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, err } return ConfigGogc{i}, nil } @@ -91,6 +99,11 @@ type GoroutineCount struct { metric.Int64ObservableUpDownCounter } +var newGoroutineCountOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Count of live goroutines."), + metric.WithUnit("{goroutine}"), +} + // NewGoroutineCount returns a new GoroutineCount instrument. func NewGoroutineCount( m metric.Meter, @@ -101,15 +114,18 @@ func NewGoroutineCount( return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newGoroutineCountOpts + } else { + opt = append(opt, newGoroutineCountOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "go.goroutine.count", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("Count of live goroutines."), - metric.WithUnit("{goroutine}"), - }, opt...)..., + opt..., ) if err != nil { - return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, err + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, err } return GoroutineCount{i}, nil } @@ -141,6 +157,11 @@ type MemoryAllocated struct { metric.Int64ObservableCounter } +var newMemoryAllocatedOpts = []metric.Int64ObservableCounterOption{ + metric.WithDescription("Memory allocated to the heap by the application."), + metric.WithUnit("By"), +} + // NewMemoryAllocated returns a new MemoryAllocated instrument. func NewMemoryAllocated( m metric.Meter, @@ -151,15 +172,18 @@ func NewMemoryAllocated( return MemoryAllocated{noop.Int64ObservableCounter{}}, nil } + if len(opt) == 0 { + opt = newMemoryAllocatedOpts + } else { + opt = append(opt, newMemoryAllocatedOpts...) + } + i, err := m.Int64ObservableCounter( "go.memory.allocated", - append([]metric.Int64ObservableCounterOption{ - metric.WithDescription("Memory allocated to the heap by the application."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return MemoryAllocated{noop.Int64ObservableCounter{}}, err + return MemoryAllocated{noop.Int64ObservableCounter{}}, err } return MemoryAllocated{i}, nil } @@ -191,6 +215,11 @@ type MemoryAllocations struct { metric.Int64ObservableCounter } +var newMemoryAllocationsOpts = []metric.Int64ObservableCounterOption{ + metric.WithDescription("Count of allocations to the heap by the application."), + metric.WithUnit("{allocation}"), +} + // NewMemoryAllocations returns a new MemoryAllocations instrument. func NewMemoryAllocations( m metric.Meter, @@ -201,15 +230,18 @@ func NewMemoryAllocations( return MemoryAllocations{noop.Int64ObservableCounter{}}, nil } + if len(opt) == 0 { + opt = newMemoryAllocationsOpts + } else { + opt = append(opt, newMemoryAllocationsOpts...) + } + i, err := m.Int64ObservableCounter( "go.memory.allocations", - append([]metric.Int64ObservableCounterOption{ - metric.WithDescription("Count of allocations to the heap by the application."), - metric.WithUnit("{allocation}"), - }, opt...)..., + opt..., ) if err != nil { - return MemoryAllocations{noop.Int64ObservableCounter{}}, err + return MemoryAllocations{noop.Int64ObservableCounter{}}, err } return MemoryAllocations{i}, nil } @@ -241,6 +273,11 @@ type MemoryGCGoal struct { metric.Int64ObservableUpDownCounter } +var newMemoryGCGoalOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target for the end of the GC cycle."), + metric.WithUnit("By"), +} + // NewMemoryGCGoal returns a new MemoryGCGoal instrument. func NewMemoryGCGoal( m metric.Meter, @@ -251,15 +288,18 @@ func NewMemoryGCGoal( return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newMemoryGCGoalOpts + } else { + opt = append(opt, newMemoryGCGoalOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "go.memory.gc.goal", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("Heap size target for the end of the GC cycle."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, err + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, err } return MemoryGCGoal{i}, nil } @@ -291,6 +331,11 @@ type MemoryLimit struct { metric.Int64ObservableUpDownCounter } +var newMemoryLimitOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."), + metric.WithUnit("By"), +} + // NewMemoryLimit returns a new MemoryLimit instrument. func NewMemoryLimit( m metric.Meter, @@ -301,15 +346,18 @@ func NewMemoryLimit( return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newMemoryLimitOpts + } else { + opt = append(opt, newMemoryLimitOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "go.memory.limit", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, err + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, err } return MemoryLimit{i}, nil } @@ -341,6 +389,11 @@ type MemoryUsed struct { metric.Int64ObservableUpDownCounter } +var newMemoryUsedOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Memory used by the Go runtime."), + metric.WithUnit("By"), +} + // NewMemoryUsed returns a new MemoryUsed instrument. func NewMemoryUsed( m metric.Meter, @@ -351,15 +404,18 @@ func NewMemoryUsed( return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newMemoryUsedOpts + } else { + opt = append(opt, newMemoryUsedOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "go.memory.used", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("Memory used by the Go runtime."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, err + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, err } return MemoryUsed{i}, nil } @@ -397,6 +453,11 @@ type ProcessorLimit struct { metric.Int64ObservableUpDownCounter } +var newProcessorLimitOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."), + metric.WithUnit("{thread}"), +} + // NewProcessorLimit returns a new ProcessorLimit instrument. func NewProcessorLimit( m metric.Meter, @@ -407,15 +468,18 @@ func NewProcessorLimit( return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newProcessorLimitOpts + } else { + opt = append(opt, newProcessorLimitOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "go.processor.limit", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."), - metric.WithUnit("{thread}"), - }, opt...)..., + opt..., ) if err != nil { - return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, err + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, err } return ProcessorLimit{i}, nil } @@ -448,6 +512,11 @@ type ScheduleDuration struct { metric.Float64Histogram } +var newScheduleDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The time goroutines have spent in the scheduler in a runnable state before actually running."), + metric.WithUnit("s"), +} + // NewScheduleDuration returns a new ScheduleDuration instrument. func NewScheduleDuration( m metric.Meter, @@ -458,15 +527,18 @@ func NewScheduleDuration( return ScheduleDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newScheduleDurationOpts + } else { + opt = append(opt, newScheduleDurationOpts...) + } + i, err := m.Float64Histogram( "go.schedule.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The time goroutines have spent in the scheduler in a runnable state before actually running."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ScheduleDuration{noop.Float64Histogram{}}, err + return ScheduleDuration{noop.Float64Histogram{}}, err } return ScheduleDuration{i}, nil } @@ -518,6 +590,7 @@ func (m ScheduleDuration) Record(ctx context.Context, val float64, attrs ...attr func (m ScheduleDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -528,4 +601,4 @@ func (m ScheduleDuration) RecordSet(ctx context.Context, val float64, set attrib *o = append(*o, metric.WithAttributeSet(set)) m.Float64Histogram.Record(ctx, val, *o...) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go index 55bde895ddd..a0ddf652d34 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go @@ -91,6 +91,11 @@ type ClientActiveRequests struct { metric.Int64UpDownCounter } +var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP requests."), + metric.WithUnit("{request}"), +} + // NewClientActiveRequests returns a new ClientActiveRequests instrument. func NewClientActiveRequests( m metric.Meter, @@ -101,15 +106,18 @@ func NewClientActiveRequests( return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newClientActiveRequestsOpts + } else { + opt = append(opt, newClientActiveRequestsOpts...) + } + i, err := m.Int64UpDownCounter( "http.client.active_requests", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of active HTTP requests."), - metric.WithUnit("{request}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientActiveRequests{noop.Int64UpDownCounter{}}, err + return ClientActiveRequests{noop.Int64UpDownCounter{}}, err } return ClientActiveRequests{i}, nil } @@ -223,6 +231,11 @@ type ClientConnectionDuration struct { metric.Float64Histogram } +var newClientConnectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the successfully established outbound HTTP connections."), + metric.WithUnit("s"), +} + // NewClientConnectionDuration returns a new ClientConnectionDuration instrument. func NewClientConnectionDuration( m metric.Meter, @@ -233,15 +246,18 @@ func NewClientConnectionDuration( return ClientConnectionDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientConnectionDurationOpts + } else { + opt = append(opt, newClientConnectionDurationOpts...) + } + i, err := m.Float64Histogram( "http.client.connection.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of the successfully established outbound HTTP connections."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ClientConnectionDuration{noop.Float64Histogram{}}, err + return ClientConnectionDuration{noop.Float64Histogram{}}, err } return ClientConnectionDuration{i}, nil } @@ -310,6 +326,7 @@ func (m ClientConnectionDuration) Record( func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -353,6 +370,11 @@ type ClientOpenConnections struct { metric.Int64UpDownCounter } +var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), + metric.WithUnit("{connection}"), +} + // NewClientOpenConnections returns a new ClientOpenConnections instrument. func NewClientOpenConnections( m metric.Meter, @@ -363,15 +385,18 @@ func NewClientOpenConnections( return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newClientOpenConnectionsOpts + } else { + opt = append(opt, newClientOpenConnectionsOpts...) + } + i, err := m.Int64UpDownCounter( "http.client.open_connections", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), - metric.WithUnit("{connection}"), - }, opt...)..., + opt..., ) if err != nil { - return ClientOpenConnections{noop.Int64UpDownCounter{}}, err + return ClientOpenConnections{noop.Int64UpDownCounter{}}, err } return ClientOpenConnections{i}, nil } @@ -488,6 +513,11 @@ type ClientRequestBodySize struct { metric.Int64Histogram } +var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client request bodies."), + metric.WithUnit("By"), +} + // NewClientRequestBodySize returns a new ClientRequestBodySize instrument. func NewClientRequestBodySize( m metric.Meter, @@ -498,15 +528,18 @@ func NewClientRequestBodySize( return ClientRequestBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestBodySizeOpts + } else { + opt = append(opt, newClientRequestBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.client.request.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP client request bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestBodySize{noop.Int64Histogram{}}, err + return ClientRequestBodySize{noop.Int64Histogram{}}, err } return ClientRequestBodySize{i}, nil } @@ -593,6 +626,7 @@ func (m ClientRequestBodySize) Record( func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -662,6 +696,11 @@ type ClientRequestDuration struct { metric.Float64Histogram } +var newClientRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP client requests."), + metric.WithUnit("s"), +} + // NewClientRequestDuration returns a new ClientRequestDuration instrument. func NewClientRequestDuration( m metric.Meter, @@ -672,15 +711,18 @@ func NewClientRequestDuration( return ClientRequestDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientRequestDurationOpts + } else { + opt = append(opt, newClientRequestDurationOpts...) + } + i, err := m.Float64Histogram( "http.client.request.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Duration of HTTP client requests."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ClientRequestDuration{noop.Float64Histogram{}}, err + return ClientRequestDuration{noop.Float64Histogram{}}, err } return ClientRequestDuration{i}, nil } @@ -753,6 +795,7 @@ func (m ClientRequestDuration) Record( func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -822,6 +865,11 @@ type ClientResponseBodySize struct { metric.Int64Histogram } +var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client response bodies."), + metric.WithUnit("By"), +} + // NewClientResponseBodySize returns a new ClientResponseBodySize instrument. func NewClientResponseBodySize( m metric.Meter, @@ -832,15 +880,18 @@ func NewClientResponseBodySize( return ClientResponseBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newClientResponseBodySizeOpts + } else { + opt = append(opt, newClientResponseBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.client.response.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP client response bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ClientResponseBodySize{noop.Int64Histogram{}}, err + return ClientResponseBodySize{noop.Int64Histogram{}}, err } return ClientResponseBodySize{i}, nil } @@ -927,6 +978,7 @@ func (m ClientResponseBodySize) Record( func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -996,6 +1048,11 @@ type ServerActiveRequests struct { metric.Int64UpDownCounter } +var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP server requests."), + metric.WithUnit("{request}"), +} + // NewServerActiveRequests returns a new ServerActiveRequests instrument. func NewServerActiveRequests( m metric.Meter, @@ -1006,15 +1063,18 @@ func NewServerActiveRequests( return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newServerActiveRequestsOpts + } else { + opt = append(opt, newServerActiveRequestsOpts...) + } + i, err := m.Int64UpDownCounter( "http.server.active_requests", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("Number of active HTTP server requests."), - metric.WithUnit("{request}"), - }, opt...)..., + opt..., ) if err != nil { - return ServerActiveRequests{noop.Int64UpDownCounter{}}, err + return ServerActiveRequests{noop.Int64UpDownCounter{}}, err } return ServerActiveRequests{i}, nil } @@ -1118,6 +1178,11 @@ type ServerRequestBodySize struct { metric.Int64Histogram } +var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server request bodies."), + metric.WithUnit("By"), +} + // NewServerRequestBodySize returns a new ServerRequestBodySize instrument. func NewServerRequestBodySize( m metric.Meter, @@ -1128,15 +1193,18 @@ func NewServerRequestBodySize( return ServerRequestBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestBodySizeOpts + } else { + opt = append(opt, newServerRequestBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.server.request.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP server request bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestBodySize{noop.Int64Histogram{}}, err + return ServerRequestBodySize{noop.Int64Histogram{}}, err } return ServerRequestBodySize{i}, nil } @@ -1220,6 +1288,7 @@ func (m ServerRequestBodySize) Record( func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1299,6 +1368,11 @@ type ServerRequestDuration struct { metric.Float64Histogram } +var newServerRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP server requests."), + metric.WithUnit("s"), +} + // NewServerRequestDuration returns a new ServerRequestDuration instrument. func NewServerRequestDuration( m metric.Meter, @@ -1309,15 +1383,18 @@ func NewServerRequestDuration( return ServerRequestDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerRequestDurationOpts + } else { + opt = append(opt, newServerRequestDurationOpts...) + } + i, err := m.Float64Histogram( "http.server.request.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("Duration of HTTP server requests."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return ServerRequestDuration{noop.Float64Histogram{}}, err + return ServerRequestDuration{noop.Float64Histogram{}}, err } return ServerRequestDuration{i}, nil } @@ -1387,6 +1464,7 @@ func (m ServerRequestDuration) Record( func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1466,6 +1544,11 @@ type ServerResponseBodySize struct { metric.Int64Histogram } +var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server response bodies."), + metric.WithUnit("By"), +} + // NewServerResponseBodySize returns a new ServerResponseBodySize instrument. func NewServerResponseBodySize( m metric.Meter, @@ -1476,15 +1559,18 @@ func NewServerResponseBodySize( return ServerResponseBodySize{noop.Int64Histogram{}}, nil } + if len(opt) == 0 { + opt = newServerResponseBodySizeOpts + } else { + opt = append(opt, newServerResponseBodySizeOpts...) + } + i, err := m.Int64Histogram( "http.server.response.body.size", - append([]metric.Int64HistogramOption{ - metric.WithDescription("Size of HTTP server response bodies."), - metric.WithUnit("By"), - }, opt...)..., + opt..., ) if err != nil { - return ServerResponseBodySize{noop.Int64Histogram{}}, err + return ServerResponseBodySize{noop.Int64Histogram{}}, err } return ServerResponseBodySize{i}, nil } @@ -1568,6 +1654,7 @@ func (m ServerResponseBodySize) Record( func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { if set.Len() == 0 { m.Int64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1638,4 +1725,4 @@ func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue { // the category of synthetic traffic, such as tests or bots. func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { return attribute.String("user_agent.synthetic.type", string(val)) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go index a78eafd1fa3..fd064530c34 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -3,7 +3,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package httpconv provides types and functionality for OpenTelemetry semantic +// Package otelconv provides types and functionality for OpenTelemetry semantic // conventions in the "otel" namespace. package otelconv @@ -172,6 +172,11 @@ type SDKExporterLogExported struct { metric.Int64Counter } +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + // NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. func NewSDKExporterLogExported( m metric.Meter, @@ -182,15 +187,18 @@ func NewSDKExporterLogExported( return SDKExporterLogExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.log.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterLogExported{noop.Int64Counter{}}, err + return SDKExporterLogExported{noop.Int64Counter{}}, err } return SDKExporterLogExported{i}, nil } @@ -319,6 +327,11 @@ type SDKExporterLogInflight struct { metric.Int64UpDownCounter } +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + // NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. func NewSDKExporterLogInflight( m metric.Meter, @@ -329,15 +342,18 @@ func NewSDKExporterLogInflight( return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.log.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterLogInflight{i}, nil } @@ -449,6 +465,11 @@ type SDKExporterMetricDataPointExported struct { metric.Int64Counter } +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + // NewSDKExporterMetricDataPointExported returns a new // SDKExporterMetricDataPointExported instrument. func NewSDKExporterMetricDataPointExported( @@ -460,15 +481,18 @@ func NewSDKExporterMetricDataPointExported( return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.metric_data_point.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), - metric.WithUnit("{data_point}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err } return SDKExporterMetricDataPointExported{i}, nil } @@ -598,6 +622,11 @@ type SDKExporterMetricDataPointInflight struct { metric.Int64UpDownCounter } +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + // NewSDKExporterMetricDataPointInflight returns a new // SDKExporterMetricDataPointInflight instrument. func NewSDKExporterMetricDataPointInflight( @@ -609,15 +638,18 @@ func NewSDKExporterMetricDataPointInflight( return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.metric_data_point.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{data_point}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterMetricDataPointInflight{i}, nil } @@ -728,6 +760,11 @@ type SDKExporterOperationDuration struct { metric.Float64Histogram } +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + // NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration // instrument. func NewSDKExporterOperationDuration( @@ -739,15 +776,18 @@ func NewSDKExporterOperationDuration( return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + i, err := m.Float64Histogram( "otel.sdk.exporter.operation.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of exporting a batch of telemetry records."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err } return SDKExporterOperationDuration{i}, nil } @@ -825,6 +865,7 @@ func (m SDKExporterOperationDuration) Record( func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -893,6 +934,11 @@ type SDKExporterSpanExported struct { metric.Int64Counter } +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + // NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. func NewSDKExporterSpanExported( m metric.Meter, @@ -903,15 +949,18 @@ func NewSDKExporterSpanExported( return SDKExporterSpanExported{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.exporter.span.exported", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterSpanExported{noop.Int64Counter{}}, err + return SDKExporterSpanExported{noop.Int64Counter{}}, err } return SDKExporterSpanExported{i}, nil } @@ -1040,6 +1089,11 @@ type SDKExporterSpanInflight struct { metric.Int64UpDownCounter } +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + // NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. func NewSDKExporterSpanInflight( m metric.Meter, @@ -1050,15 +1104,18 @@ func NewSDKExporterSpanInflight( return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.exporter.span.inflight", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err } return SDKExporterSpanInflight{i}, nil } @@ -1169,6 +1226,11 @@ type SDKLogCreated struct { metric.Int64Counter } +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + // NewSDKLogCreated returns a new SDKLogCreated instrument. func NewSDKLogCreated( m metric.Meter, @@ -1179,15 +1241,18 @@ func NewSDKLogCreated( return SDKLogCreated{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.log.created", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKLogCreated{noop.Int64Counter{}}, err + return SDKLogCreated{noop.Int64Counter{}}, err } return SDKLogCreated{i}, nil } @@ -1254,6 +1319,11 @@ type SDKMetricReaderCollectionDuration struct { metric.Float64Histogram } +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + // NewSDKMetricReaderCollectionDuration returns a new // SDKMetricReaderCollectionDuration instrument. func NewSDKMetricReaderCollectionDuration( @@ -1265,15 +1335,18 @@ func NewSDKMetricReaderCollectionDuration( return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil } + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + i, err := m.Float64Histogram( "otel.sdk.metric_reader.collection.duration", - append([]metric.Float64HistogramOption{ - metric.WithDescription("The duration of the collect operation of the metric reader."), - metric.WithUnit("s"), - }, opt...)..., + opt..., ) if err != nil { - return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err } return SDKMetricReaderCollectionDuration{i}, nil } @@ -1343,6 +1416,7 @@ func (m SDKMetricReaderCollectionDuration) Record( func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { if set.Len() == 0 { m.Float64Histogram.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -1384,6 +1458,11 @@ type SDKProcessorLogProcessed struct { metric.Int64Counter } +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. func NewSDKProcessorLogProcessed( m metric.Meter, @@ -1394,15 +1473,18 @@ func NewSDKProcessorLogProcessed( return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.processor.log.processed", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err } return SDKProcessorLogProcessed{i}, nil } @@ -1515,6 +1597,11 @@ type SDKProcessorLogQueueCapacity struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity // instrument. func NewSDKProcessorLogQueueCapacity( @@ -1526,15 +1613,18 @@ func NewSDKProcessorLogQueueCapacity( return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.log.queue.capacity", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorLogQueueCapacity{i}, nil } @@ -1581,6 +1671,11 @@ type SDKProcessorLogQueueSize struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + // NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. func NewSDKProcessorLogQueueSize( m metric.Meter, @@ -1591,15 +1686,18 @@ func NewSDKProcessorLogQueueSize( return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.log.queue.size", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), - metric.WithUnit("{log_record}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorLogQueueSize{i}, nil } @@ -1646,6 +1744,11 @@ type SDKProcessorSpanProcessed struct { metric.Int64Counter } +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed // instrument. func NewSDKProcessorSpanProcessed( @@ -1657,15 +1760,18 @@ func NewSDKProcessorSpanProcessed( return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.processor.span.processed", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err } return SDKProcessorSpanProcessed{i}, nil } @@ -1778,6 +1884,11 @@ type SDKProcessorSpanQueueCapacity struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity // instrument. func NewSDKProcessorSpanQueueCapacity( @@ -1789,15 +1900,18 @@ func NewSDKProcessorSpanQueueCapacity( return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.span.queue.capacity", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorSpanQueueCapacity{i}, nil } @@ -1844,6 +1958,11 @@ type SDKProcessorSpanQueueSize struct { metric.Int64ObservableUpDownCounter } +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + // NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize // instrument. func NewSDKProcessorSpanQueueSize( @@ -1855,15 +1974,18 @@ func NewSDKProcessorSpanQueueSize( return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + i, err := m.Int64ObservableUpDownCounter( "otel.sdk.processor.span.queue.size", - append([]metric.Int64ObservableUpDownCounterOption{ - metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err } return SDKProcessorSpanQueueSize{i}, nil } @@ -1910,6 +2032,11 @@ type SDKSpanLive struct { metric.Int64UpDownCounter } +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + // NewSDKSpanLive returns a new SDKSpanLive instrument. func NewSDKSpanLive( m metric.Meter, @@ -1920,15 +2047,18 @@ func NewSDKSpanLive( return SDKSpanLive{noop.Int64UpDownCounter{}}, nil } + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + i, err := m.Int64UpDownCounter( "otel.sdk.span.live", - append([]metric.Int64UpDownCounterOption{ - metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKSpanLive{noop.Int64UpDownCounter{}}, err + return SDKSpanLive{noop.Int64UpDownCounter{}}, err } return SDKSpanLive{i}, nil } @@ -2013,6 +2143,11 @@ type SDKSpanStarted struct { metric.Int64Counter } +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + // NewSDKSpanStarted returns a new SDKSpanStarted instrument. func NewSDKSpanStarted( m metric.Meter, @@ -2023,15 +2158,18 @@ func NewSDKSpanStarted( return SDKSpanStarted{noop.Int64Counter{}}, nil } + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + i, err := m.Int64Counter( "otel.sdk.span.started", - append([]metric.Int64CounterOption{ - metric.WithDescription("The number of created spans."), - metric.WithUnit("{span}"), - }, opt...)..., + opt..., ) if err != nil { - return SDKSpanStarted{noop.Int64Counter{}}, err + return SDKSpanStarted{noop.Int64Counter{}}, err } return SDKSpanStarted{i}, nil } @@ -2123,4 +2261,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K // value of the sampler for this span. func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { return attribute.String("otel.span.sampling_result", string(val)) -} \ No newline at end of file +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index aea11a2b52c..d9ecef1cad2 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -4,6 +4,7 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( + "slices" "time" "go.opentelemetry.io/otel/attribute" @@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { + if set.Len() == 0 { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + return config + }) + } + return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go index d3aa476ee12..d01e7936649 100644 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -66,6 +66,10 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. + // + // Note that adding attributes at span creation using [WithAttributes] is preferred + // to calling SetAttribute later, as samplers can only consider information + // already present during span creation. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index bcaa5aa5378..0d5b0291873 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 07145e254b5..f4a3893eb5a 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.38.0 + version: v1.39.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.60.0 + version: v0.61.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.14.0 + version: v0.15.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,9 +36,28 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.13 + version: v0.0.14 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/trace/internal/telemetry/test +modules: + go.opentelemetry.io/otel/exporters/stdout/stdouttrace: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/prometheus: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: + version-refs: + - ./internal/version.go diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index a7c5d19bff3..1f8d49bc983 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -34,7 +34,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// Represents any type of attribute value. AnyValue may contain a // primitive value such as a string or integer or it may contain an arbitrary nested // object containing arrays, key-value lists and primitives. type AnyValue struct { @@ -252,8 +252,10 @@ type KeyValueList struct { // A collection of key/value pairs of key-value pairs. The list may be empty (may // contain 0 elements). + // // The keys MUST be unique (it is not allowed to have more than one // value with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -296,14 +298,16 @@ func (x *KeyValueList) GetValues() []*KeyValue { return nil } -// KeyValue is a key-value pair that is used to store Span attributes, Link +// Represents a key-value pair that is used to store Span attributes, Link // attributes, etc. type KeyValue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The key name of the pair. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value of the pair. Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } @@ -360,14 +364,21 @@ type InstrumentationScope struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // A name denoting the Instrumentation scope. // An empty instrumentation scope name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Defines the version of the instrumentation scope. + // An empty instrumentation scope version means the version is unknown. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` // Additional attributes that describe the scope. [Optional]. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). - Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // The behavior of software that receives duplicated keys can be unpredictable. + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // The number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } func (x *InstrumentationScope) Reset() { diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go index ec187b13d0b..748c9fb1c52 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -375,7 +375,8 @@ type ScopeMetrics struct { // is recorded in. Notably, the last part of the URL path is the version number of the // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all metrics in the "metrics" field. + // This schema_url applies to the data in the "scope" field and all metrics in the + // "metrics" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -521,11 +522,11 @@ type Metric struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // name of the metric. + // The name of the metric. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // description of the metric, which can be used in documentation. + // A description of the metric, which can be used in documentation. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // unit in which the metric value is reported. Follows the format + // The unit in which the metric value is reported. Follows the format // described by https://unitsofmeasure.org/ucum.html. Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` // Data determines the aggregation type (if any) of the metric, what is the @@ -546,6 +547,7 @@ type Metric struct { // for lossless roundtrip translation to / from another data model. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Metadata []*v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata,omitempty"` } @@ -699,6 +701,8 @@ type Gauge struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -748,11 +752,13 @@ type Sum struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` // aggregation_temporality describes if the aggregator reports delta changes // since last report time, or cumulative changes since a fixed start time. AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` - // If "true" means that the sum is monotonic. + // Represents whether the sum is monotonic. IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` } @@ -816,6 +822,8 @@ type Histogram struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` // aggregation_temporality describes if the aggregator reports delta changes // since last report time, or cumulative changes since a fixed start time. @@ -875,6 +883,8 @@ type ExponentialHistogram struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` // aggregation_temporality describes if the aggregator reports delta changes // since last report time, or cumulative changes since a fixed start time. @@ -941,6 +951,8 @@ type Summary struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // The time series data points. + // Note: Multiple time series may be included (same timestamp, different attributes). DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -994,6 +1006,7 @@ type NumberDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1144,6 +1157,7 @@ type HistogramDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1331,6 +1345,7 @@ type ExponentialHistogramDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1343,11 +1358,11 @@ type ExponentialHistogramDataPoint struct { // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January // 1970. TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // count is the number of values in the population. Must be + // The number of values in the population. Must be // non-negative. This value must be equal to the sum of the "bucket_counts" // values in the positive and negative Buckets plus the "zero_count" field. Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` - // sum of the values in the population. If count is zero then this field + // The sum of the values in the population. If count is zero then this field // must be zero. // // Note: Sum should only be filled out when measuring non-negative discrete @@ -1372,7 +1387,7 @@ type ExponentialHistogramDataPoint struct { // scale is not restricted by the protocol, as the permissible // values depend on the range of the data. Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"` - // zero_count is the count of values that are either exactly zero or + // The count of values that are either exactly zero or // within the region considered zero by the instrumentation at the // tolerated degree of precision. This bucket stores values that // cannot be expressed using the standard exponential formula as @@ -1391,9 +1406,9 @@ type ExponentialHistogramDataPoint struct { // (Optional) List of exemplars collected from // measurements that were used to form the data point Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"` - // min is the minimum value over (start_time, end_time]. + // The minimum value over (start_time, end_time]. Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` - // max is the maximum value over (start_time, end_time]. + // The maximum value over (start_time, end_time]. Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` // ZeroThreshold may be optionally set to convey the width of the zero // region. Where the zero region is defined as the closed interval @@ -1546,6 +1561,7 @@ type SummaryDataPoint struct { // where this point belongs. The list may be empty (may contain 0 elements). // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` // StartTimeUnixNano is optional but strongly encouraged, see the // the detailed comments above Metric. @@ -1798,11 +1814,11 @@ type ExponentialHistogramDataPoint_Buckets struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Offset is the bucket index of the first entry in the bucket_counts array. + // The bucket index of the first entry in the bucket_counts array. // // Note: This uses a varint encoding as a simple form of compression. Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` - // bucket_counts is an array of count values, where bucket_counts[i] carries + // An array of count values, where bucket_counts[i] carries // the count of the bucket at index (offset+i). bucket_counts[i] is the count // of values greater than base^(offset+i) and less than or equal to // base^(offset+i+1). diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index eb7745d66e0..301247ddfee 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -44,8 +44,9 @@ type Resource struct { // Set of attributes that describe the resource. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // The number of dropped attributes. If the value is 0, then // no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // Set of entities that participate in this Resource. diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index b342a0a9401..d7bfca90299 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -388,7 +388,8 @@ type ScopeSpans struct { // is recorded in. Notably, the last part of the URL path is the version number of the // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all spans and span events in the "spans" field. + // This schema_url applies to the data in the "scope" field and all spans and span + // events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -512,21 +513,21 @@ type Span struct { // two spans with the same name may be distinguished using `CLIENT` (caller) // and `SERVER` (callee) to identify queueing latency associated with the span. Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // start_time_unix_nano is the start time of the span. On the client side, this is the time + // The start time of the span. On the client side, this is the time // kept by the local machine where the span execution starts. On the server side, this // is the time when the server's application handler starts running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the span. On the client side, this is the time + // The end time of the span. On the client side, this is the time // kept by the local machine where the span execution ends. On the server side, this // is the time when the server application handler stops running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes + // A collection of key/value pairs. Note, global attributes // like server name can be set using the resource API. Examples of attributes: // // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" @@ -534,24 +535,23 @@ type Span struct { // "example.com/myattribute": true // "example.com/score": 10.239 // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes + // The number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many // attributes. If this value is 0, then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // events is a collection of Event items. + // A collection of Event items. Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - // dropped_events_count is the number of dropped events. If the value is 0, then no + // The number of dropped events. If the value is 0, then no // events were dropped. DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` - // links is a collection of Links, which are references from this span to a span + // A collection of Links, which are references from this span to a span // in the same or different trace. Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` - // dropped_links_count is the number of dropped links after the maximum size was + // The number of dropped links after the maximum size was // enforced. If this value is 0, then no links were dropped. DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` // An optional final status for this span. Semantically when Status isn't set, it means @@ -769,16 +769,17 @@ type Span_Event struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // time_unix_nano is the time the event occurred. + // The time the event occurred. TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` - // name of the event. + // The name of the event. // This field is semantically required to be set to non-empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // attributes is a collection of attribute key/value pairs on the event. + // A collection of attribute key/value pairs on the event. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // The number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } @@ -859,11 +860,12 @@ type Span_Link struct { SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` // The trace_state associated with the link. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` - // attributes is a collection of attribute key/value pairs on the link. + // A collection of attribute key/value pairs on the link. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). + // The behavior of software that receives duplicated keys can be unpredictable. Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` - // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // The number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // Flags, a bit field. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index de34feb8442..3e3b6306953 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -9,7 +9,6 @@ package oauth2 // import "golang.org/x/oauth2" import ( - "bytes" "context" "errors" "net/http" @@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // PKCE), https://www.oauth.com/oauth2-servers/pkce/ and // https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 42517077c43..fd39be4efdc 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -256,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -613,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index d0a75da572c..120a7b35d1d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1615,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fbc45..97a61fc5b84 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34aefc..a0d6d498c4b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c126f..dd9c903f9ad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba162..384c61ca3a8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c0cb..6384c9831fc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3c09..553c1c6f15e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a6382..b3339f2099a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a3033d0..177091d2bc3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c8761..c5abf156d09 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb04a..f1f3fadf576 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce2409cf..203ad9c54af 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb578..4b9abcb21a2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e952665..f87983037d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7cf4..64347eb354c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e6af..7d71911718f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec9ad..50e8e644970 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 1de0ce66691..2079de7b0ed 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -33,17 +33,21 @@ guidelines, there may be valid reasons to do so, but it should be rare. ## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly: +Please read the following carefully to ensure your contributions can be merged +smoothly and quickly. + +### PR Contents - Create **small PRs** that are narrowly focused on **addressing a single concern**. We often receive PRs that attempt to fix several things at the same time, and if one part of the PR has a problem, that will hold up the entire PR. -- For **speculative changes**, consider opening an issue and discussing it - first. If you are suggesting a behavioral or API change, consider starting - with a [gRFC proposal](https://github.com/grpc/proposal). Many new features - that are not bug fixes will require cross-language agreement. +- If your change does not address an **open issue** with an **agreed + resolution**, consider opening an issue and discussing it first. If you are + suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). Many new features that are not + bug fixes will require cross-language agreement. - If you want to fix **formatting or style**, consider whether your changes are an obvious improvement or might be considered a personal preference. If a @@ -56,16 +60,6 @@ How to get your contributions merged smoothly and quickly: often written as "iff". Please do not make spelling correction changes unless you are certain they are misspellings. -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a GitHub issue if it exists. - -- Maintain a **clean commit history** and use **meaningful commit messages**. - PRs with messy commit histories are difficult to review and won't be merged. - Before sending your PR, ensure your changes are based on top of the latest - `upstream/master` commits, and avoid rebasing in the middle of a code review. - You should **never use `git push -f`** unless absolutely necessary during a - review, as it can interfere with GitHub's tracking of comments. - - **All tests need to be passing** before your change can be merged. We recommend you run tests locally before creating your PR to catch breakages early on: @@ -81,15 +75,80 @@ How to get your contributions merged smoothly and quickly: GitHub, which will trigger a GitHub Actions run that you can use to verify everything is passing. -- If you are adding a new file, make sure it has the **copyright message** +- Note that there are two GitHub actions checks that need not be green: + + 1. We test the freshness of the generated proto code we maintain via the + `vet-proto` check. If the source proto files are updated, but our repo is + not updated, an optional checker will fail. This will be fixed by our team + in a separate PR and will not prevent the merge of your PR. + + 2. We run a checker that will fail if there is any change in dependencies of + an exported package via the `dependencies` check. If new dependencies are + added that are not appropriate, we may not accept your PR (see below). + +- If you are adding a **new file**, make sure it has the **copyright message** template at the top as a comment. You can copy the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number of exceptions. **If your contribution introduces new dependencies**, you will - need a discussion with gRPC-Go maintainers. A GitHub action check will run on - every PR, and will flag any transitive dependency changes from any public - package. + need a discussion with gRPC-Go maintainers. + +### PR Descriptions + +- **PR titles** should start with the name of the component being addressed, or + the type of change. Examples: transport, client, server, round_robin, xds, + cleanup, deps. + +- Read and follow the **guidelines for PR titles and descriptions** here: + https://google.github.io/eng-practices/review/developer/cl-descriptions.html + + *particularly* the sections "First Line" and "Body is Informative". + + Note: your PR description will be used as the git commit message in a + squash-and-merge if your PR is approved. We may make changes to this as + necessary. + +- **Does this PR relate to an open issue?** On the first line, please use the + tag `Fixes #` to ensure the issue is closed when the PR is merged. Or + use `Updates #` if the PR is related to an open issue, but does not fix + it. Consider filing an issue if one does not already exist. + +- PR descriptions *must* conclude with **release notes** as follows: + + ``` + RELEASE NOTES: + * : + ``` + + This need not match the PR title. + + The summary must: + + * be something that gRPC users will understand. + + * clearly explain the feature being added, the issue being fixed, or the + behavior being changed, etc. If fixing a bug, be clear about how the bug + can be triggered by an end-user. + + * begin with a capital letter and use complete sentences. + + * be as short as possible to describe the change being made. + + If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or + GitHub-related, use `RELEASE NOTES: n/a`. + +### PR Process + +- Please **self-review** your code changes before sending your PR. This will + prevent simple, obvious errors from causing delays. + +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. - Unless your PR is trivial, you should **expect reviewer comments** that you will need to address before merging. We'll label the PR as `Status: Requires @@ -98,5 +157,3 @@ How to get your contributions merged smoothly and quickly: `stale`, and we will automatically close it after 7 days if we don't hear back from you. Please feel free to ping issues or bugs if you do not get a response within a week. - -- Exceptions to the rules can be made if there's a compelling reason to do so. diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea8899818c2..b4bc3a2bf36 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,55 +16,124 @@ * */ -// Package pickfirst contains the pick_first load balancing policy. +// Package pickfirst contains the pick_first load balancing policy which +// is the universal leaf policy. package pickfirst import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "net" + "net/netip" + "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - - _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { - if envconfig.NewPickFirstEnabled { - return - } balancer.Register(pickfirstBuilder{}) } -var logger = grpclog.Component("pick-first-lb") +// Name is the name of the pick_first balancer. +const Name = "pick_first" + +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) +) const ( - // Name is the name of the pick_first balancer. - Name = "pick_first" - logPrefix = "[pick-first-lb %p] " + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (pickfirstBuilder) Name() string { +func (b pickfirstBuilder) Name() string { return Name } +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -74,90 +143,129 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } - return cfg, nil + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil } type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { if b.logger.V(2) { b.logger.Infof("Received error from the name resolver: %v", err) } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } return } - b.cc.UpdateState(balancer.State{ + + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } -// Shuffler is an interface for shuffling an address list. -type Shuffler interface { - ShuffleAddressListForTesting(n int, swap func(i, j int)) -} - -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n -// is the number of elements. swap swaps the elements with indexes i and j. -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } - func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } - var addrs []resolver.Address + var newAddrs []resolver.Address if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling will - // change the order of endpoints but not touch the order of the addresses - // within each endpoint. - A61 + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } - // "Flatten the list by concatenating the ordered list of addresses for each - // of the endpoints, in order." - A61 + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8304 section 4." - A61 - // TODO: support the above language. - addrs = append(addrs, endpoint.Addresses...) + newAddrs = append(newAddrs, endpoint.Addresses...) } } else { // Endpoints not set, process addresses until we migrate resolver @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. - addrs = state.ResolverState.Addresses + newAddrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) } } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - return balancer.ErrBadResolverState + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() return nil } @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) } -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + // Move the balancer into CONNECTING state immediately. This is done to + // avoid staying in IDLE if a resolver update arrives before the first + // SubConn reports CONNECTING. + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.subConn.Shutdown() + } + b.subConns = resolver.NewAddressMapV2[*scData]() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMapV2[bool]() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + seenAddrs.Set(addr, true) + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMapV2[bool]() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMapV2[*scData]() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + switch sd.rawConnectivityState { + case connectivity.Idle: + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return } - if b.subConn != subConn { + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { return } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) } - b.state = state.ConnectivityState } -func (b *pickfirstBalancer) Close() { +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) } -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) } type picker struct { @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - subConn balancer.SubConn + exitIdle func() } func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() + i.exitIdle() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go deleted file mode 100644 index 67f315a0dbc..00000000000 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ /dev/null @@ -1,906 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pickfirstleaf contains the pick_first load balancing policy which -// will be the universal leaf policy after dualstack changes are implemented. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package pickfirstleaf - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/netip" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/pickfirst/internal" - "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -func init() { - if envconfig.NewPickFirstEnabled { - // Register as the default pick_first balancer. - Name = "pick_first" - } - balancer.Register(pickfirstBuilder{}) -} - -// enableHealthListenerKeyType is a unique key type used in resolver -// attributes to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} - -var ( - logger = grpclog.Component("pick-first-leaf-lb") - // Name is the name of the pick_first_leaf balancer. - // It is changed to "pick_first" in init() if this balancer is to be - // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "{disconnection}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) -) - -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: cc.MetricsRecorder(), - - subConns: resolver.NewAddressMapV2[*scData](), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, - } - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (b pickfirstBuilder) Name() string { - return Name -} - -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of endpoints received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -// scData keeps track of the current state of the subConn. -// It is not safe for concurrent access. -type scData struct { - // The following fields are initialized at build time and read-only after - // that. - subConn balancer.SubConn - addr resolver.Address - - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, - } - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sd, state) - }, - }) - if err != nil { - return nil, err - } - sd.subConn = sc - return sd, nil -} - -type pickfirstBalancer struct { - // The following fields are initialized at build time and read-only after - // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil - - // The mutex is used to ensure synchronization of updates triggered - // from the idle picker and the already serialized resolver, - // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMapV2[*scData] - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool -} - -// ResolverError is called by the ClientConn when the name resolver produces -// an error or when pickfirst determined the resolver update to be invalid. -func (b *pickfirstBalancer) ResolverError(err error) { - b.mu.Lock() - defer b.mu.Unlock() - b.resolverErrorLocked(err) -} - -func (b *pickfirstBalancer) resolverErrorLocked(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - - // The picker will not change since the balancer does not currently - // report an error. If the balancer hasn't received a single good resolver - // update yet, transition to TRANSIENT_FAILURE. - if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { - if b.logger.V(2) { - b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") - } - return - } - - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - b.mu.Lock() - defer b.mu.Unlock() - b.cancelConnectionTimer() - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.closeSubConnsLocked() - b.addressList.updateAddrs(nil) - b.resolverErrorLocked(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - var newAddrs []resolver.Address - if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling - // will change the order of endpoints but not touch the order of the - // addresses within each endpoint. - A61 - if cfg.ShuffleAddressList { - endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - - // "Flatten the list by concatenating the ordered list of addresses for - // each of the endpoints, in order." - A61 - for _, endpoint := range endpoints { - newAddrs = append(newAddrs, endpoint.Addresses...) - } - } else { - // Endpoints not set, process addresses until we migrate resolver - // emissions fully to Endpoints. The top channel does wrap emitted - // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split - // endpoints properly. Once all balancers correctly forward endpoints - // down, can delete this else conditional. - newAddrs = state.ResolverState.Addresses - if cfg.ShuffleAddressList { - newAddrs = append([]resolver.Address{}, newAddrs...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - } - - // If an address appears in multiple endpoints or in the same endpoint - // multiple times, we keep it only once. We will create only one SubConn - // for the address because an AddressMap is used to store SubConns. - // Not de-duplicating would result in attempting to connect to the same - // SubConn multiple times in the same pass. We don't want this. - newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { - return nil - } - - b.reconcileSubConnsLocked(newAddrs) - // If it's the first resolver update or the balancer was already READY - // (but the new address list does not contain the ready SubConn) or - // CONNECTING, enter CONNECTING. - // We may be in TRANSIENT_FAILURE due to a previous empty address list, - // we should still enter CONNECTING because the sticky TF behaviour - // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported - // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.startFirstPassLocked() - } - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) Close() { - b.mu.Lock() - defer b.mu.Unlock() - b.closeSubConnsLocked() - b.cancelConnectionTimer() - b.state = connectivity.Shutdown -} - -// ExitIdle moves the balancer out of idle state. It can be called concurrently -// by the idlePicker and clientConn so access to variables should be -// synchronized. -func (b *pickfirstBalancer) ExitIdle() { - b.mu.Lock() - defer b.mu.Unlock() - if b.state == connectivity.Idle { - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.connectionFailedInFirstPass = false - } - b.requestConnectionLocked() -} - -func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { - sd.subConn.Shutdown() - } - b.subConns = resolver.NewAddressMapV2[*scData]() -} - -// deDupAddresses ensures that each address appears only once in the slice. -func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMapV2[*scData]() - retAddrs := []resolver.Address{} - - for _, addr := range addrs { - if _, ok := seenAddrs.Get(addr); ok { - continue - } - retAddrs = append(retAddrs, addr) - } - return retAddrs -} - -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - -// reconcileSubConnsLocked updates the active subchannels based on a new address -// list from the resolver. It does this by: -// - closing subchannels: any existing subchannels associated with addresses -// that are no longer in the updated list are shut down. -// - removing subchannels: entries for these closed subchannels are removed -// from the subchannel map. -// -// This ensures that the subchannel map accurately reflects the current set of -// addresses received from the name resolver. -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMapV2[bool]() - for _, addr := range newAddrs { - newAddrsMap.Set(addr, true) - } - - for _, oldAddr := range b.subConns.Keys() { - if _, ok := newAddrsMap.Get(oldAddr); ok { - continue - } - val, _ := b.subConns.Get(oldAddr) - val.subConn.Shutdown() - b.subConns.Delete(oldAddr) - } -} - -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn -// becomes ready, which means that all other subConn must be shutdown. -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { - if sd.subConn != selected.subConn { - sd.subConn.Shutdown() - } - } - b.subConns = resolver.NewAddressMapV2[*scData]() - b.subConns.Set(selected.addr, selected) -} - -// requestConnectionLocked starts connecting on the subchannel corresponding to -// the current address. If no subchannel exists, one is created. If the current -// subchannel is in TransientFailure, a connection to the next address is -// attempted until a subchannel is found. -func (b *pickfirstBalancer) requestConnectionLocked() { - if !b.addressList.isValid() { - return - } - var lastErr error - for valid := true; valid; valid = b.addressList.increment() { - curAddr := b.addressList.currentAddress() - sd, ok := b.subConns.Get(curAddr) - if !ok { - var err error - // We want to assign the new scData to sd from the outer scope, - // hence we can't use := below. - sd, err = b.newSCData(curAddr) - if err != nil { - // This should never happen, unless the clientConn is being shut - // down. - if b.logger.V(2) { - b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) - } - // Do nothing, the LB policy will be closed soon. - return - } - b.subConns.Set(curAddr, sd) - } - - switch sd.rawConnectivityState { - case connectivity.Idle: - sd.subConn.Connect() - b.scheduleNextConnectionLocked() - return - case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - sd.connectionFailedInFirstPass = true - lastErr = sd.lastErr - continue - case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) - return - - } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { - return - } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) -} - -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes this - // SubConn. - if !b.isActiveSCData(sd) { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown - return - } - - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - b.shutdownRemainingLocked(sd) - if !b.addressList.seekTo(sd.addr) { - // This should not fail as we should have only one SubConn after - // entering READY. The SubConn should be present in the addressList. - b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) - return - } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) - }) - return - } - - // If the LB policy is READY, and it receives a subchannel state change, - // it means that the READY subchannel has failed. - // A SubConn can also transition from CONNECTING directly to IDLE when - // a transport is successfully created, but the connection fails - // before the SubConn can send the notification for READY. We treat - // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) - b.addressList.reset() - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, - }) - return - } - - if b.firstPass { - switch newState.ConnectivityState { - case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - } - case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } - - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) - } - return - } - - // We have finished the first pass, keep re-connecting failing SubConns. - switch newState.ConnectivityState { - case connectivity.TransientFailure: - b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError - if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: newState.ConnectionError}, - }) - } - // We don't need to request re-resolution since the SubConn already - // does that before reporting TRANSIENT_FAILURE. - // TODO: #7534 - Move re-resolution requests from SubConn into - // pick_first. - case connectivity.Idle: - sd.subConn.Connect() - } -} - -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { - if !sd.connectionFailedInFirstPass { - return - } - } - b.firstPass = false - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: lastErr}, - }) - // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { - if sd.rawConnectivityState == connectivity.Idle { - sd.subConn.Connect() - } - } -} - -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - exitIdle func() -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.exitIdle() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -// addressList manages sequentially iterating over addresses present in a list -// of endpoints. It provides a 1 dimensional view of the addresses present in -// the endpoints. -// This type is not safe for concurrent access. -type addressList struct { - addresses []resolver.Address - idx int -} - -func (al *addressList) isValid() bool { - return al.idx < len(al.addresses) -} - -func (al *addressList) size() int { - return len(al.addresses) -} - -// increment moves to the next index in the address list. -// This method returns false if it went off the list, true otherwise. -func (al *addressList) increment() bool { - if !al.isValid() { - return false - } - al.idx++ - return al.idx < len(al.addresses) -} - -// currentAddress returns the current address pointed to in the addressList. -// If the list is in an invalid state, it returns an empty address instead. -func (al *addressList) currentAddress() resolver.Address { - if !al.isValid() { - return resolver.Address{} - } - return al.addresses[al.idx] -} - -func (al *addressList) reset() { - al.idx = 0 -} - -func (al *addressList) updateAddrs(addrs []resolver.Address) { - al.addresses = addrs - al.reset() -} - -// seekTo returns false if the needle was not found and the current index was -// left unchanged. -func (al *addressList) seekTo(needle resolver.Address) bool { - for ai, addr := range al.addresses { - if !equalAddressIgnoringBalAttributes(&addr, &needle) { - continue - } - al.idx = ai - return true - } - return false -} - -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - -// equalAddressIgnoringBalAttributes returns true is a and b are considered -// equal. This is different from the Equal method on the resolver.Address type -// which considers all fields to determine equality. Here, we only consider -// fields that are meaningful to the SubConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 22045bf3946..22e6e326794 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" - "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" ) @@ -47,7 +47,7 @@ func (bb builder) Name() string { } func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - childBuilder := balancer.Get(pickfirstleaf.Name).Build + childBuilder := balancer.Get(pickfirst.Name).Build bal := &rrBalancer{ cc: cc, Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), @@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. - ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState), }) } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 948a21ef683..2c760e623f6 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func( if acbw.ccb.cc.dopts.disableHealthCheck { return noOpRegisterHealthListenerFn } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } regHealthLisFn := internal.RegisterClientHealthCheckListener if regHealthLisFn == nil { // The health package is not imported. - return noOpRegisterHealthListenerFn - } - cfg := acbw.ac.cc.healthCheckConfig() - if cfg == nil { + channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.") return noOpRegisterHealthListenerFn } return func(ctx context.Context, listener func(balancer.SubConnState)) func() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b1364a03252..42c61cf9fe5 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 3f762285db7..c0c2c9a76ab 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -40,11 +40,12 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. @@ -210,7 +211,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper() - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) @@ -456,7 +458,7 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) cc.channelz = channelz.RegisterChannel(parentChannel, target) - cc.addTraceEvent("created") + cc.addTraceEvent(fmt.Sprintf("created for target %q", target)) } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -621,7 +623,8 @@ type ClientConn struct { channelz *channelz.Channel // Channelz object. resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + metricsRecorderList *istats.MetricsRecorderList + statsHandler stats.Handler // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index c8e337cdda0..06f6c6c70a9 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,8 +44,7 @@ type PerRPCCredentials interface { // A54). uri is the URI of the entry point for the request. When supported // by the underlying implementation, ctx can be used for timeout and // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. + // to this call. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae142c4..dadd21e40f9 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -27,8 +27,10 @@ package encoding import ( "io" + "slices" "strings" + "google.golang.org/grpc/encoding/internal" "google.golang.org/grpc/internal/grpcutil" ) @@ -36,6 +38,24 @@ import ( // It is intended for grpc internal use only. const Identity = "identity" +func init() { + internal.RegisterCompressorForTesting = func(c Compressor) func() { + name := c.Name() + curCompressor, found := registeredCompressor[name] + RegisterCompressor(c) + return func() { + if found { + registeredCompressor[name] = curCompressor + return + } + delete(registeredCompressor, name) + grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool { + return s == name + }) + } + } +} + // Compressor is used for compressing and decompressing when sending or // receiving messages. // diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go new file mode 100644 index 00000000000..ee9acb43779 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the encoding package. +package internal + +// RegisterCompressorForTesting registers a compressor in the global compressor +// registry. It returns a cleanup function that should be called at the end +// of the test to unregister the compressor. +// +// This prevents compressors registered in one test from appearing in the +// encoding headers of subsequent tests. +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func() diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index ceec319dd2f..1ab874c7ad2 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } + // Important: if we remove this Size call then we cannot use + // UseCachedSize in MarshalOptions below. size := proto.Size(vv) + + // MarshalOptions with UseCachedSize allows reusing the result from the + // previous Size call. This is safe here because: + // + // 1. We just computed the size. + // 2. We assume the message is not being mutated concurrently. + // + // Important: If the proto.Size call above is removed, using UseCachedSize + // becomes unsafe and may lead to incorrect marshaling. + // + // For more details, see the doc of UseCachedSize: + // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions + marshalOptions := proto.MarshalOptions{UseCachedSize: true} + if mem.IsBelowBufferPoolingThreshold(size) { - buf, err := proto.Marshal(vv) + buf, err := marshalOptions.Marshal(vv) if err != nil { return nil, err } @@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { } else { pool := mem.DefaultBufferPool() buf := pool.Get(size) - if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil { pool.Put(buf) return nil, err } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a18e..2b57ba65a39 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -75,6 +75,7 @@ const ( MetricTypeIntHisto MetricTypeFloatHisto MetricTypeIntGauge + MetricTypeIntUpDownCount ) // Int64CountHandle is a typed handle for a int count metric. This handle @@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Count(h, incr, labels...) } +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric. +// This handle is passed at the recording point in order to know which metric +// to record on. +type Int64UpDownCountHandle MetricDescriptor + +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 up-down counter value on the metrics recorder provided. +// The value 'v' can be positive to increment or negative to decrement. +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) { + recorder.RecordInt64UpDownCount(h, v, labels...) +} + // Float64CountHandle is a typed handle for a float count metric. This handle is // passed at the recording point in order to know which metric to record on. type Float64CountHandle MetricDescriptor @@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { return (*Int64GaugeHandle)(descPtr) } +// RegisterInt64UpDownCount registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle { + registerMetric(descriptor.Name, descriptor.Default) + // Set the specific metric type for the up-down counter + descriptor.Type = MetricTypeIntUpDownCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64UpDownCountHandle)(descPtr) +} + // snapshotMetricsRegistryForTesting snapshots the global data of the metrics // registry. Returns a cleanup function that sets the metrics registry to its // original state. diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee1423605ab..cb57f1a748b 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -38,6 +38,9 @@ type MetricsRecorder interface { // RecordInt64Gauge records the measurement alongside labels on the int // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) + // RecordInt64UpDownCounter records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string) } // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 22d263fb94b..8f7d9f6bbe6 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 11f91668ac9..467392b8d45 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -83,6 +83,7 @@ func (b *Unbounded) Load() { default: } } else if b.closing && !b.closed { + b.closed = true close(b.c) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 2bffe477768..3b7ba596625 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -194,7 +194,7 @@ func (r RefChannelType) String() string { // If channelz is not turned ON, this will simply log the event descriptions. func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { // Log only the trace description associated with the bottom most entity. - d := fmt.Sprintf("[%s]%s", e, desc.Desc) + d := fmt.Sprintf("[%s] %s", e, desc.Desc) switch desc.Severity { case CtUnknown, CtInfo: l.InfoDepth(depth+1, d) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7e060f5ed13..91f760936c0 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -52,12 +52,6 @@ var ( // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "false". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) - // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the @@ -75,6 +69,14 @@ var ( // ALTSHandshakerKeepaliveParams is set if we should add the // KeepaliveParams when dial the ALTS handshaker service. ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) + + // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443 + // to a target address that lacks one. This flag only has an effect when all of + // the following conditions are met: + // - A connect proxy is being used. + // - Target resolution is disabled. + // - The DNS resolver is being used. + EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index e87551552ad..7685d08b54d 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -68,4 +68,15 @@ var ( // trust. For more details, see: // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) + + // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata + // configuring use of an HTTP CONNECT proxy via xDS from cluster resources. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md + XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false) + + // XDSBootstrapCallCredsEnabled controls if call credentials can be used in + // xDS bootstrap configuration via the `call_creds` field. For more details, + // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md + XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 8e8e861280a..9b6d8a1fa3f 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func (cs *CallbackSerializer) run(ctx context.Context) { defer close(cs.done) - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } - - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() + // Close the buffer when the context is canceled + // to prevent new callbacks from being added. + context.AfterFunc(ctx, cs.callbacks.Close) - // Run all pending callbacks. + // Run all callbacks. for cb := range cs.callbacks.Get() { cs.callbacks.Load() cb.(func(context.Context))(ctx) diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index 20b8fb098ac..5bfa67b7268 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -22,11 +22,13 @@ package delegatingresolver import ( "fmt" + "net" "net/http" "net/url" "sync" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/proxyattributes" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport/networktype" @@ -40,6 +42,8 @@ var ( HTTPSProxyFromEnvironment = http.ProxyFromEnvironment ) +const defaultPort = "443" + // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as // an intermediary between the gRPC ClientConn and the child resolvers. @@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti targetResolver: nopResolver{}, } + addr := target.Endpoint() var err error - r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget { + addr, err = parseTarget(addr) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err) + } + } + + r.proxyURL, err = proxyURLForTarget(addr) if err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err) } // proxy is not configured or proxy address excluded using `NO_PROXY` env @@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { r.targetResolverState = &resolver.State{ - Addresses: []resolver.Address{{Addr: target.Endpoint()}}, - Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + Addresses: []resolver.Address{{Addr: addr}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}}, } r.updateTargetResolverState(*r.targetResolverState) return r, nil @@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool { return false } +// parseTarget takes a target string and ensures it is a valid "host:port" target. +// +// It does the following: +// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"), +// it is returned as is. +// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost", +// returning "localhost:80". +// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort +// is added. +// +// An error is returned for empty targets or targets with a trailing colon +// but no port (e.g., "host:"). +func parseTarget(target string) (string, error) { + if target == "" { + return "", fmt.Errorf("missing address") + } + + host, port, err := net.SplitHostPort(target) + if err != nil { + // If SplitHostPort fails, it's likely because the port is missing. + // We append the default port and return the result. + return net.JoinHostPort(target, defaultPort), nil + } + + // If SplitHostPort succeeds, we check for edge cases. + if port == "" { + // A success with an empty port means the target had a trailing colon, + // e.g., "host:", which is an error. + return "", fmt.Errorf("missing port after port-separator colon") + } + if host == "" { + // A success with an empty host means the target was like ":80". + // We default the host to "localhost". + host = "localhost" + } + return net.JoinHostPort(host, port), nil +} + func skipProxy(address resolver.Address) bool { // Avoid proxy when network is not tcp. networkType, ok := networktype.Get(address) diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index 79044657be1..d5f7e4d62dd 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordInt64UpDownCount records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64UpDownCount(handle, incr, labels...) + } +} + // RecordFloat64Count records the measurement alongside labels on the float // count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go new file mode 100644 index 00000000000..49019b80d15 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + + "google.golang.org/grpc/stats" +) + +type combinedHandler struct { + handlers []stats.Handler +} + +// NewCombinedHandler combines multiple stats.Handlers into a single handler. +// +// It returns nil if no handlers are provided. If only one handler is +// provided, it is returned directly without wrapping. +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler { + switch len(handlers) { + case 0: + return nil + case 1: + return handlers[0] + default: + return &combinedHandler{handlers: handlers} + } +} + +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagRPC(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + for _, h := range ch.handlers { + h.HandleRPC(ctx, stats) + } +} + +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagConn(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + for _, h := range ch.handlers { + h.HandleConn(ctx, stats) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index ccc0e017e5e..980452519ea 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -29,25 +29,27 @@ import ( // ClientStream implements streaming functionality for a gRPC client. type ClientStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. ct *http2Client done chan struct{} // closed at the end of stream to unblock writers. doneFunc func() // invoked at the end of stream. - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + header metadata.MD // the received header metadata + + status *status.Status // the status error received from the server + + // Non-pointer fields are at the end to optimize GC allocations. + // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server + headerValid bool + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream } // Read reads an n byte message from the input stream. @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool { func (s *ClientStream) Status() *status.Status { return s.status } + +func (s *ClientStream) requestRead(n int) { + s.ct.adjustWindow(s, uint32(n)) +} + +func (s *ClientStream) updateWindow(n int) { + s.ct.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index a2831e5d01f..2dcd1e63bdd 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -496,6 +496,16 @@ const ( serverSide ) +// maxWriteBufSize is the maximum length (number of elements) the cached +// writeBuf can grow to. The length depends on the number of buffers +// contained within the BufferSlice produced by the codec, which is +// generally small. +// +// If a writeBuf larger than this limit is required, it will be allocated +// and freed after use, rather than being cached. This avoids holding +// on to large amounts of memory. +const maxWriteBufSize = 64 + // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each @@ -530,6 +540,8 @@ type loopyWriter struct { // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) + + writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek. } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { @@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } l.estdStreams[h.streamID] = str } @@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } return l.originateStream(str, h) } @@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } - reader := str.reader + reader := &str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. if !dataItem.processing { dataItem.processing = true - str.reader.Reset(dataItem.data) + reader.Reset(dataItem.data) dataItem.data.Free() } // A data item is represented by a dataFrame, since it later translates into @@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) { if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = reader.Close() + reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) { remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + l.writeBuf = l.writeBuf[:0] + if hSize > 0 { + l.writeBuf = append(l.writeBuf, dataItem.h[:hSize]) + } + if dSize > 0 { + var err error + l.writeBuf, err = reader.Peek(dSize, l.writeBuf) + if err != nil { + // This must never happen since the reader must have at least dSize + // bytes. + // Log an error to fail tests. + l.logger.Errorf("unexpected error while reading Data frame payload: %v", err) + return false, err } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) { if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf) + reader.Discard(dSize) + if cap(l.writeBuf) > maxWriteBufSize { + l.writeBuf = nil + } else { + clear(l.writeBuf) + } + if err != nil { return false, err } str.bytesOutStanding += size @@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = reader.Close() + reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224ec8..7cfbc9637b8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -28,7 +28,7 @@ import ( // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { - quota int32 + _ noCopy // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} @@ -38,16 +38,17 @@ type writeQuota struct { // It is implemented as a field so that it can be updated // by tests. replenish func(n int) + quota int32 } -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } +// init allows a writeQuota to be initialized in-place, which is useful for +// resetting a buffer or for avoiding a heap allocation when the buffer is +// embedded in another struct. +func (w *writeQuota) init(sz int32, done <-chan struct{}) { + w.quota = sz + w.ch = make(chan struct{}, 1) + w.done = done w.replenish = w.realReplenish - return w } func (w *writeQuota) get(sz int32) error { @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error { func (w *writeQuota) realReplenish(n int) { sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { + newQuota := atomic.AddInt32(&w.quota, sz) + previousQuota := newQuota - sz + if previousQuota <= 0 && newQuota > 0 { select { case w.ch <- struct{}{}: default: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 3dea2357351..7ab3422b8a2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -170,7 +170,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats stats.Handler logger *grpclog.PrefixLogger bufferPool mem.BufferPool @@ -274,14 +274,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status } }) - if err == nil { // transport has not been closed + if err == nil && ht.stats != nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } + s.hdrMu.Lock() + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) return err @@ -372,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e ht.rw.(http.Flusher).Flush() }) - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } + if err == nil && ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } return err } +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) { +} + +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) { +} + func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc @@ -409,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req s := &ServerStream{ - Stream: &Stream{ + Stream: Stream{ id: 0, // irrelevant ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, @@ -422,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream st: ht, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, + s.Stream.buf.init() + s.readRequester = s + s.trReader = transportReader{ + reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf}, + windowHandler: s, } // readerDone is closed when the Body.Read-ing goroutine exits. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5467fe9715a..65b4ab2439e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -44,6 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/proxyattributes" + istats "google.golang.org/grpc/internal/stats" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -105,7 +106,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandlers []stats.Handler + statsHandler stats.Handler initialWindowSize int32 @@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts writerDone: make(chan struct{}), goAway: make(chan struct{}), keepaliveDone: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandlers: opts.StatsHandlers, + statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...), initialWindowSize: initialWindowSize, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, @@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) - connBegin := &stats.ConnBegin{ + t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{ Client: true, - } - sh.HandleConn(t.ctx, connBegin) + }) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ - Stream: &Stream{ + Stream: Stream{ method: callHdr.Method, sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), contentSubtype: callHdr.ContentSubtype, }, ct: t, @@ -492,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.Stream.buf.init() + s.Stream.wq.init(defaultWriteQuota, s.done) + s.readRequester = s // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - s.Close(err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + s.trReader = transportReader{ + reader: recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: &s.buf, + clientStream: s, }, + windowHandler: s, } return s } @@ -556,6 +550,19 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te hfLen += len(authData) + len(callAuthData) + registeredCompressors := t.registeredCompressors + if callHdr.PreviousAttempts > 0 { + hfLen++ + } + if callHdr.SendCompress != "" { + hfLen++ + } + if registeredCompressors != "" { + hfLen++ + } + if _, ok := ctx.Deadline(); ok { + hfLen++ + } headerFields := make([]hpack.HeaderField, 0, hfLen) headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) @@ -568,7 +575,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } - registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) // Include the outgoing compressor name when compressor is not registered @@ -811,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil }, onOrphaned: cleanup, - wq: s.wq, + wq: &s.wq, } firstTry := true var ch chan struct{} @@ -842,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS transportDrainRequired = t.nextID > MaxStreamID s.id = hdr.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + s.fc = inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() @@ -893,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if len(t.statsHandlers) != 0 { + if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + }) } if transportDrainRequired { if t.logger.V(logLevel) { @@ -990,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode // accessed anymore. func (t *http2Client) Close(err error) { t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + // For background on the deadline value chosen here, see + // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 . + t.conn.SetReadDeadline(time.Now().Add(time.Second)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1051,11 +1056,10 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ + if t.statsHandler != nil { + t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{ Client: true, - } - sh.HandleConn(t.ctx, connEnd) + }) } } @@ -1166,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) { }) } -func (t *http2Client) handleData(f *http2.DataFrame) { +func (t *http2Client) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -1210,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } // The server has closed the stream without sending trailers. Record that @@ -1465,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string recvCompress string - httpStatusCode *int httpStatusErr string - rawStatusCode = codes.Unknown + // the code from the grpc-status header, if present + grpcStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string + httpStatus string ) - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - for _, hf := range frame.Fields { switch hf.Name { case "content-type": @@ -1495,31 +1489,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } - rawStatusCode = codes.Code(uint32(code)) + grpcStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case ":status": - if hf.Value == "200" { - httpStatusErr = "" - statusCode := 200 - httpStatusCode = &statusCode - break - } - - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - httpStatusCode = &statusCode - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) + httpStatus = hf.Value default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1534,25 +1508,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { + // If a non-gRPC response is received, then evaluate the HTTP status to + // process the response and close the stream. + // In case http status doesn't provide any error information (status : 200), + // then evalute response code to be Unknown. + if !isGRPC { + var grpcErrorCode = codes.Internal + if httpStatus == "" { + httpStatusErr = "malformed header: missing HTTP status" + } else { + // Parse the status codes (e.g. "200", 404"). + statusCode, err := strconv.Atoi(httpStatus) + if err != nil { + se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + if statusCode >= 100 && statusCode < 200 { + if endStream { + se := status.New(codes.Internal, fmt.Sprintf( + "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + } + // In case of informational headers, return. + return + } + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] + grpcErrorCode, ok = HTTPStatusConvTab[statusCode] if !ok { - code = codes.Unknown + grpcErrorCode = codes.Unknown } } var errs []string if httpStatusErr != "" { errs = append(errs, httpStatusErr) } + if contentTypeErr != "" { errs = append(errs, contentTypeErr) } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) + + se := status.New(grpcErrorCode, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1583,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - for _, sh := range t.statsHandlers { + if t.statsHandler != nil { if !endStream { - inHeader := &stats.InHeader{ + t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + }) } else { - inTrailer := &stats.InTrailer{ + t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) + }) } } @@ -1606,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. @@ -1653,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) { // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } @@ -1668,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() + errorDetail := t.framer.errorDetail() var msg string if errorDetail != nil { msg = errorDetail.Error() @@ -1686,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 9f725e15a81..6f78a6b0c8c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,8 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/protobuf/proto" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" @@ -42,7 +44,6 @@ import ( istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" - "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -86,7 +87,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats []stats.Handler + stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*ServerStream), - stats: config.StatsHandlers, + stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, @@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.maxStreamID = streamID - buf := newRecvBuffer() s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + Stream: Stream{ + id: streamID, + fc: inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, headerWireLength: int(frame.Header().Length), } + s.Stream.buf.init() var ( // if false, content-type was missing or invalid isGRPC = false @@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.channelz.SocketMetrics.StreamsStarted.Add(1) t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.readRequester = s s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ + s.Stream.wq.init(defaultWriteQuota, s.ctxDone) + s.trReader = transportReader{ + reader: recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + recv: &s.buf, }, + windowHandler: s, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, - wq: s.wq, + wq: &s.wq, }) handle(s) return nil @@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }() for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }) continue } - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) { } -func (t *http2Server) handleData(f *http2.DataFrame) { +func (t *http2Server) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } if f.StreamEnded() { @@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - for _, sh := range t.stats { + if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. - outHeader := &stats.OutHeader{ + t.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) + }) } return nil } @@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { + if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { + _, isActive := t.activeStreams[s.id] + if isActive { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() @@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } t.mu.Unlock() - if channelz.IsOn() { + if isActive && channelz.IsOn() { if eosReceived { t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { @@ -1353,10 +1343,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() - oldState := s.swapState(streamDone) - if oldState == streamDone { - return - } + // We can't return early even if the stream's state is "done" as the state + // might have been set by the `finishStream` method. Deleting the stream via + // `finishStream` can get blocked on flow control. + s.swapState(streamDone) t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index e3663f87f39..6209eb23cdc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -25,7 +25,6 @@ import ( "fmt" "io" "math" - "net" "net/http" "net/url" "strconv" @@ -37,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" ) const ( @@ -300,11 +300,11 @@ type bufWriter struct { buf []byte offset int batchSize int - conn net.Conn + conn io.Writer err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter { w := &bufWriter{ batchSize: batchSize, conn: conn, @@ -388,15 +388,35 @@ func toIOError(err error) error { return ioError{error: err} } +type parsedDataFrame struct { + http2.FrameHeader + data mem.Buffer +} + +func (df *parsedDataFrame) StreamEnded() bool { + return df.FrameHeader.Flags.Has(http2.FlagDataEndStream) +} + type framer struct { - writer *bufWriter - fr *http2.Framer + writer *bufWriter + fr *http2.Framer + headerBuf []byte // cached slice for framer headers to reduce heap allocs. + reader io.Reader + dataFrame parsedDataFrame // Cached data frame to avoid heap allocations. + pool mem.BufferPool + errDetail error } var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer { + if memPool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream + // is always initialized with a BufferPool. + memPool = mem.DefaultBufferPool() + } + if writeBufferSize < 0 { writeBufferSize = 0 } @@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu f := &framer{ writer: w, fr: http2.NewFramer(w, r), + reader: r, + pool: memPool, } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } +// writeData writes a DATA frame. +// +// It is the caller's responsibility not to violate the maximum frame size. +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error { + var flags http2.Flags + if endStream { + flags = http2.FlagDataEndStream + } + length := uint32(0) + for _, d := range data { + length += uint32(len(d)) + } + // TODO: Replace the header write with the framer API being added in + // https://github.com/golang/go/issues/66655. + f.headerBuf = append(f.headerBuf[:0], + byte(length>>16), + byte(length>>8), + byte(length), + byte(http2.FrameData), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) + if _, err := f.writer.Write(f.headerBuf); err != nil { + return err + } + for _, d := range data { + if _, err := f.writer.Write(d); err != nil { + return err + } + } + return nil +} + +// readFrame reads a single frame. The returned Frame is only valid +// until the next call to readFrame. +func (f *framer) readFrame() (any, error) { + f.errDetail = nil + fh, err := f.fr.ReadFrameHeader() + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + // Read the data frame directly from the underlying io.Reader to avoid + // copies. + if fh.Type == http2.FrameData { + err = f.readDataFrame(fh) + return &f.dataFrame, err + } + fr, err := f.fr.ReadFrameForHeader(fh) + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + return fr, err +} + +// errorDetail returns a more detailed error of the last error +// returned by framer.readFrame. For instance, if readFrame +// returns a StreamError with code PROTOCOL_ERROR, errorDetail +// will say exactly what was invalid. errorDetail is not guaranteed +// to return a non-nil value. +// errorDetail is reset after the next call to readFrame. +func (f *framer) errorDetail() error { + return f.errDetail +} + +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + f.errDetail = errors.New("DATA frame with stream ID 0") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This + // conversion is performed by mem.NewBuffer. To avoid the extra allocation + // a []byte is allocated directly if required and cast to a mem.SliceBuffer. + var buf []byte + // poolHandle is the pointer returned by the buffer pool (if it's used.). + var poolHandle *[]byte + useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length)) + if useBufferPool { + poolHandle = f.pool.Get(int(fh.Length)) + buf = *poolHandle + defer func() { + if err != nil { + f.pool.Put(poolHandle) + } + }() + } else { + buf = make([]byte, int(fh.Length)) + } + if fh.Flags.Has(http2.FlagDataPadded) { + if fh.Length == 0 { + return io.ErrUnexpectedEOF + } + // This initial 1-byte read can be inefficient for unbuffered readers, + // but it allows the rest of the payload to be read directly to the + // start of the destination slice. This makes it easy to return the + // original slice back to the buffer pool. + if _, err := io.ReadFull(f.reader, buf[:1]); err != nil { + return err + } + padSize := buf[0] + buf = buf[:len(buf)-1] + if int(padSize) > len(buf) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + f.errDetail = errors.New("pad size larger than data payload") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + buf = buf[:len(buf)-int(padSize)] + } else if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + + f.dataFrame.FrameHeader = fh + if useBufferPool { + // Update the handle to point to the (potentially re-sliced) buf. + *poolHandle = buf + f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool) + } else { + f.dataFrame.data = mem.SliceBuffer(buf) + } + return nil +} + +func (df *parsedDataFrame) Header() http2.FrameHeader { + return df.FrameHeader +} + func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index cf8da0b52d0..ed6a13b7501 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -32,7 +32,7 @@ import ( // ServerStream implements streaming functionality for a gRPC server. type ServerStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. st internalServerTransport ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) @@ -43,12 +43,13 @@ type ServerStream struct { // Holds compressor names passed in grpc-accept-encoding metadata from the // client. clientAdvertisedCompressors string - headerWireLength int // hdrMu protects outgoing header and trailer metadata. hdrMu sync.Mutex header metadata.MD // the outgoing header metadata. Updated by WriteHeader. headerSent atomic.Bool // atomically set when the headers are sent out. + + headerWireLength int } // Read reads an n byte message from the input stream. @@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error { s.hdrMu.Unlock() return nil } + +func (s *ServerStream) requestRead(n int) { + s.st.adjustWindow(s, uint32(n)) +} + +func (s *ServerStream) updateWindow(n int) { + s.st.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7dd53e80a75..5ff83a7d7d7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -68,11 +68,11 @@ type recvBuffer struct { err error } -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b +// init allows a recvBuffer to be initialized in-place, which is useful +// for resetting a buffer or for avoiding a heap allocation when the buffer +// is embedded in another struct. +func (b *recvBuffer) init() { + b.c = make(chan recvMsg, 1) } func (b *recvBuffer) put(r recvMsg) { @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. - err error + _ noCopy + clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error } func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { n, r.last = mem.ReadUnsafe(header, r.last) return n, nil } - if r.closeStream != nil { + if r.clientStream != nil { n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { } return buf, nil } - if r.closeStream != nil { + if r.clientStream != nil { buf, r.err = r.readClient(n) } else { buf, r.err = r.read(n) @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, n) case m := <-r.recv.get(): @@ -285,27 +286,32 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream recvCompress string sendCompress string - buf *recvBuffer - trReader *transportReader - fc *inFlow - wq *writeQuota - - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - state streamState + readRequester readRequester // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. + + // Non-pointer fields are at the end to optimize GC performance. + state streamState + id uint32 + buf recvBuffer + trReader transportReader + fc inFlow + wq writeQuota +} + +// readRequester is used to state application's intentions to read data. This +// is used to adjust flow control, if needed. +type readRequester interface { + requestRead(int) } func (s *Stream) swapState(st streamState) streamState { @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { if er := s.trReader.er; er != nil { return er } - s.requestRead(len(header)) + s.readRequester.requestRead(len(header)) for len(header) != 0 { n, err := s.trReader.ReadMessageHeader(header) header = header[n:] @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { if er := s.trReader.er; er != nil { return nil, er } - s.requestRead(n) + s.readRequester.requestRead(n) for n != 0 { buf, err := s.trReader.Read(n) var bufLen int @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { return data, nil } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + _ noCopy // The handler to control the window update procedure for both this // particular stream and the associated transport. - windowHandler func(int) + windowHandler windowHandler er error + reader recvBufferReader +} + +// The handler to control the window update procedure for both this +// particular stream and the associated transport. +type windowHandler interface { + updateWindow(int) } func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(n) + t.windowHandler.updateWindow(n) return n, nil } @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { t.er = err return buf, err } - t.windowHandler(buf.Len()) + t.windowHandler.updateWindow(buf.Len()) return buf, nil } @@ -454,7 +478,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler + StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -615,6 +639,8 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() + adjustWindow(s *ServerStream, n uint32) + updateWindow(s *ServerStream, n uint32) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index c37c58c0233..f211e727451 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -32,6 +32,9 @@ type BufferPool interface { Get(length int) *[]byte // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. Put(*[]byte) } @@ -118,7 +121,11 @@ type sizedBufferPool struct { } func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } b := *buf clear(b[:cap(b)]) *buf = b[:size] @@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) { func newSizedBufferPool(size int) *sizedBufferPool { return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, defaultSize: size, } } @@ -160,6 +161,7 @@ type simpleBufferPool struct { func (p *simpleBufferPool) Get(size int) *[]byte { bs, ok := p.pool.Get().(*[]byte) if ok && cap(*bs) >= size { + clear((*bs)[:cap(*bs)]) *bs = (*bs)[:size] return bs } diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index af510d20c5a..084fb19c6d1 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,6 +19,7 @@ package mem import ( + "fmt" "io" ) @@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { // Reader returns a new Reader for the input slice after taking references to // each underlying buffer. -func (s BufferSlice) Reader() Reader { +func (s BufferSlice) Reader() *Reader { s.Ref() - return &sliceReader{ + return &Reader{ data: s, len: s.Len(), } } // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. +// with other systems. +// // Buffers will be freed as they are read. -type Reader interface { - io.Reader - io.ByteReader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int - // Reset frees the currently held buffer slice and starts reading from the - // provided slice. This allows reusing the reader object. - Reset(s BufferSlice) -} - -type sliceReader struct { +// +// A Reader can be constructed from a BufferSlice; alternatively the zero value +// of a Reader may be used after calling Reset on it. +type Reader struct { data BufferSlice len int // The index into data[0].ReadOnlyData(). bufferIdx int } -func (r *sliceReader) Remaining() int { +// Remaining returns the number of unread bytes remaining in the slice. +func (r *Reader) Remaining() int { return r.len } -func (r *sliceReader) Reset(s BufferSlice) { +// Reset frees the currently held buffer slice and starts reading from the +// provided slice. This allows reusing the reader object. +func (r *Reader) Reset(s BufferSlice) { r.data.Free() s.Ref() r.data = s @@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) { r.bufferIdx = 0 } -func (r *sliceReader) Close() error { +// Close frees the underlying BufferSlice and never returns an error. Subsequent +// calls to Read will return (0, io.EOF). +func (r *Reader) Close() error { r.data.Free() r.data = nil r.len = 0 return nil } -func (r *sliceReader) freeFirstBufferIfEmpty() bool { +func (r *Reader) freeFirstBufferIfEmpty() bool { if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { return false } @@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool { return true } -func (r *sliceReader) Read(buf []byte) (n int, _ error) { +func (r *Reader) Read(buf []byte) (n int, _ error) { if r.len == 0 { return 0, io.EOF } @@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) { return n, nil } -func (r *sliceReader) ReadByte() (byte, error) { +// ReadByte reads a single byte. +func (r *Reader) ReadByte() (byte, error) { if r.len == 0 { return 0, io.EOF } @@ -290,3 +287,59 @@ nextBuffer: } } } + +// Discard skips the next n bytes, returning the number of bytes discarded. +// +// It frees buffers as they are fully consumed. +// +// If Discard skips fewer than n bytes, it also returns an error. +func (r *Reader) Discard(n int) (discarded int, err error) { + total := n + for n > 0 && r.len > 0 { + curData := r.data[0].ReadOnlyData() + curSize := min(n, len(curData)-r.bufferIdx) + n -= curSize + r.len -= curSize + r.bufferIdx += curSize + if r.bufferIdx >= len(curData) { + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + } + } + discarded = total - n + if n > 0 { + return discarded, fmt.Errorf("insufficient bytes in reader") + } + return discarded, nil +} + +// Peek returns the next n bytes without advancing the reader. +// +// Peek appends results to the provided res slice and returns the updated slice. +// This pattern allows re-using the storage of res if it has sufficient +// capacity. +// +// The returned subslices are views into the underlying buffers and are only +// valid until the reader is advanced past the corresponding buffer. +// +// If Peek returns fewer than n bytes, it also returns an error. +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) { + for i := 0; n > 0 && i < len(r.data); i++ { + curData := r.data[i].ReadOnlyData() + start := 0 + if i == 0 { + start = r.bufferIdx + } + curSize := min(n, len(curData)-start) + if curSize == 0 { + continue + } + res = append(res, curData[start:start+curSize]) + n -= curSize + } + if n > 0 { + return nil, fmt.Errorf("insufficient bytes in reader") + } + return res, nil +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969af4..1e783febf92 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { } // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 47ea09f5c9b..6b04c9e8735 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -657,8 +657,20 @@ type streamReader interface { Read(n int) (mem.BufferSlice, error) } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // parser reads complete gRPC messages from the underlying reader. type parser struct { + _ noCopy // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. @@ -949,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR // Information about RPC type rpcInfo struct { failfast bool - preloaderInfo *compressorInfo + preloaderInfo compressorInfo } // Information about Preloader @@ -968,7 +980,7 @@ type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, - preloaderInfo: &compressorInfo{ + preloaderInfo: compressorInfo{ codec: codec, cp: cp, comp: comp, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 1da2a542acd..ddd37734119 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -124,7 +124,8 @@ type serviceInfo struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts serverOptions + statsHandler stats.Handler mu sync.Mutex // guards following lis map[net.Listener]bool @@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - channelz: channelz.RegisterServer(""), + lis: make(map[net.Listener]bool), + opts: opts, + statsHandler: istats.NewCombinedHandler(opts.statsHandlers...), + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, + StatsHandler: s.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { ctx = transport.SetConnection(ctx, rawConn) ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + if s.statsHandler != nil { + ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{ RemoteAddr: st.Peer().Addr, LocalAddr: st.Peer().LocalAddr, }) - sh.HandleConn(ctx, &stats.ConnBegin{}) + s.statsHandler.HandleConn(ctx, &stats.ConnBegin{}) } defer func() { st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) + if s.statsHandler != nil { + s.statsHandler.HandleConn(ctx, &stats.ConnEnd{}) } }() @@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = stream.Write(hdr, payload, opts) - if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } - } + if err == nil && s.statsHandler != nil { + s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) } return err } @@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + sh := s.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: false, IsServerStream: false, } @@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.Finish() } - for _, sh := range shs { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { + if sh != nil || len(binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { + if sh != nil { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if channelz.IsOn() { s.incrCallsStarted() } - shs := s.opts.statsHandlers + sh := s.statsHandler var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } + sh.HandleRPC(ctx, statsBegin) } ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: shs, + statsHandler: sh, } - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.mu.Unlock() } - if len(shs) != 0 { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser method := sm[pos+1:] // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { + if s.statsHandler != nil { md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + s.statsHandler.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d9bbd4c57cf..ca87ff9776e 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cc.NewStream(ctx, desc, method, opts...) } +var emptyMethodConfig = serviceconfig.MethodConfig{} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { // Start tracking the RPC for idleness purposes. This is where a stream is // created for both streaming and unary RPCs, and hence is a good place to @@ -217,7 +219,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return nil, err } - var mc serviceconfig.MethodConfig + mc := &emptyMethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) @@ -240,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if rpcConfig.Context != nil { ctx = rpcConfig.Context } - mc = rpcConfig.MethodConfig + mc = &rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted if rpcConfig.Interceptor != nil { rpcInfo.Context = nil @@ -258,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -325,7 +327,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs := &clientStream{ callHdr: callHdr, ctx: ctx, - methodConfig: &mc, + methodConfig: mc, opts: opts, callInfo: callInfo, cc: cc, @@ -418,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) + sh := cs.cc.statsHandler + if sh != nil { beginTime = time.Now() - begin := &stats.Begin{ + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: method, FailFast: cs.callInfo.failFast, + NameResolutionDelay: cs.nameResolutionDelay, + }) + sh.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: cs.callInfo.failFast, IsClientStream: cs.desc.ClientStreams, IsServerStream: cs.desc.ServerStreams, IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) + }) } var trInfo *traceInfo @@ -461,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) beginTime: beginTime, cs: cs, decompressorV0: cs.cc.dopts.dc, - statsHandlers: shs, + statsHandler: sh, trInfo: trInfo, }, nil } @@ -482,10 +486,8 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } - if pick.blocked { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) - } + if pick.blocked && a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) } return nil } @@ -529,7 +531,7 @@ func (a *csAttempt) newStream() error { } a.transportStream = s a.ctx = s.Context() - a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -549,6 +551,8 @@ type clientStream struct { sentLast bool // sent an end stream + receivedFirstMsg bool // set after the first message is received + methodConfig *MethodConfig ctx context.Context // the application's context, wrapped by stats/tracing @@ -599,7 +603,7 @@ type csAttempt struct { cs *clientStream transport transport.ClientTransport transportStream *transport.ClientStream - parser *parser + parser parser pickResult balancer.PickResult finished bool @@ -613,8 +617,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandlers []stats.Handler - beginTime time.Time + statsHandler stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -1108,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } return nil } func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { + if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1139,16 +1141,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompressorSet = true } - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !cs.desc.ServerStreams && !cs.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + cs.receivedFirstMsg = true if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { @@ -1156,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1172,12 +1179,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1210,15 +1217,14 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - for _, sh := range a.statsHandlers { - end := &stats.End{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.End{ Client: true, BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, - } - sh.HandleRPC(a.ctx, end) + }) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1324,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.transportStream = s - as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1359,6 +1365,7 @@ type addrConnStream struct { transport transport.ClientTransport ctx context.Context sentLast bool + receivedFirstMsg bool desc *StreamDesc codec baseCodec sendCompressorV0 Compressor @@ -1366,7 +1373,7 @@ type addrConnStream struct { decompressorSet bool decompressorV0 Decompressor decompressorV1 encoding.Compressor - parser *parser + parser parser // mu guards finished and is held for the entire finish method. mu sync.Mutex @@ -1479,15 +1486,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompressorSet = true } - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !as.desc.ServerStreams && !as.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + as.receivedFirstMsg = true if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. @@ -1496,12 +1508,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1584,7 +1596,7 @@ type ServerStream interface { type serverStream struct { ctx context.Context s *transport.ServerStream - p *parser + p parser codec baseCodec desc *StreamDesc @@ -1601,7 +1613,7 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler []stats.Handler + statsHandler stats.Handler binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1737,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) { binlog.Log(ss.ctx, sm) } } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } return nil } @@ -1771,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + if ss.statsHandler != nil || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1795,16 +1805,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return toRPCErr(err) } ss.recvFirstMsg = true - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ @@ -1821,7 +1829,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } // Special handling for non-client-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { return nil } else if err != nil { return err diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bc1eb290f69..9e6d018fb7f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.75.0" +const Version = "1.77.0" diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 688aabe434e..dbcf90b871f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -72,9 +72,10 @@ type ( EditionFeatures EditionFeatures } FileL2 struct { - Options func() protoreflect.ProtoMessage - Imports FileImports - Locations SourceLocations + Options func() protoreflect.ProtoMessage + Imports FileImports + OptionImports func() protoreflect.FileImports + Locations SourceLocations } // EditionFeatures is a frequently-instantiated struct, so please take care @@ -126,12 +127,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } - -// Not exported and just used to reconstruct the original FileDescriptor proto -func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -150,6 +148,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD func (fd *File) ProtoType(protoreflect.FileDescriptor) {} func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +// The next two are not part of the FileDescriptor interface. They are just used to reconstruct +// the original FileDescriptor proto. +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) OptionImports() protoreflect.FileImports { + if f := fd.lazyInit().OptionImports; f != nil { + return f() + } + return emptyFiles +} + func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { fd.lazyInitOnce() @@ -182,9 +190,9 @@ type ( L2 *EnumL2 // protected by fileDesc.once } EnumL1 struct { - eagerValues bool // controls whether EnumL2.Values is already populated - EditionFeatures EditionFeatures + Visibility int32 + eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { Options func() protoreflect.ProtoMessage @@ -219,6 +227,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit() func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} + +// This is not part of the EnumDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (ed *Enum) Visibility() int32 { return ed.L1.Visibility } + func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 @@ -244,13 +257,13 @@ type ( L2 *MessageL2 // protected by fileDesc.once } MessageL1 struct { - Enums Enums - Messages Messages - Extensions Extensions - IsMapEntry bool // promoted from google.protobuf.MessageOptions - IsMessageSet bool // promoted from google.protobuf.MessageOptions - + Enums Enums + Messages Messages + Extensions Extensions EditionFeatures EditionFeatures + Visibility int32 + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { Options func() protoreflect.ProtoMessage @@ -319,6 +332,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } + +// This is not part of the MessageDescriptor interface. It is just used to reconstruct +// the original FileDescriptor proto. +func (md *Message) Visibility() int32 { return md.L1.Visibility } + func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index d2f549497eb..e91860f5a21 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl case genid.EnumDescriptorProto_Value_field_number: numValues++ } + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Visibility_field_number: + ed.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] @@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor md.unmarshalSeedOptions(v) } prevField = num + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Visibility_field_number: + md.L1.Visibility = int32(v) + } default: m := protowire.ConsumeFieldValue(num, typ, b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index d4c94458bd9..dd31faaeb0a 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) { var enumIdx, messageIdx, extensionIdx, serviceIdx int var rawOptions []byte + var optionImports []string fd.L2 = new(FileL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) { imp = PlaceholderFile(path) } fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_OptionDependency_field_number: + optionImports = append(optionImports, sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) { } } fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) + if len(optionImports) > 0 { + var imps FileImports + var once sync.Once + fd.L2.OptionImports = func() protoreflect.FileImports { + once.Do(func() { + imps = make(FileImports, len(optionImports)) + for i, path := range optionImports { + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + imps[i] = protoreflect.FileImport{FileDescriptor: imp} + } + }) + return &imps + } + } } func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index a0aad2777f3..66ba906806f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -13,8 +13,10 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" ) -var defaultsCache = make(map[Edition]EditionFeatures) -var defaultsKeys = []Edition{} +var ( + defaultsCache = make(map[Edition]EditionFeatures) + defaultsKeys = []Edition{} +) func init() { unmarshalEditionDefaults(editiondefaults.Defaults) @@ -41,7 +43,7 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { b = b[m:] parent.StripEnumPrefix = int(v) default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num)) } } return parent @@ -76,7 +78,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { // DefaultSymbolVisibility is enforced in protoc, runtimes should not // inspect this value. default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num)) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -150,7 +152,7 @@ func unmarshalEditionDefaults(b []byte) { _, m := protowire.ConsumeVarint(b) b = b[m:] default: - panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num)) + panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 697d1c14f3c..77de0f238ce 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 8 + Patch = 10 PreRelease = "" ) diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 5f983b6b63a..e07c04e6252 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -596,16 +596,7 @@ func newInformer(clientState Store, options InformerOptions) Controller { // KeyLister, that way resync operations will result in the correct set // of update/delete deltas. - var fifo Queue - if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { - fifo = NewRealFIFO(MetaNamespaceKeyFunc, clientState, options.Transform) - } else { - fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: clientState, - EmitDeltaTypeReplaced: true, - Transformer: options.Transform, - }) - } + fifo := newQueueFIFO(clientState, options.Transform) cfg := &Config{ Queue: fifo, @@ -623,3 +614,15 @@ func newInformer(clientState Store, options InformerOptions) Controller { } return New(cfg) } + +func newQueueFIFO(clientState Store, transform TransformFunc) Queue { + if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { + return NewRealFIFO(MetaNamespaceKeyFunc, clientState, transform) + } else { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + EmitDeltaTypeReplaced: true, + Transformer: transform, + }) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 9d9e238ccce..a0d7a834aa0 100644 --- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -270,7 +270,8 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { } var ( - _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue + _ = TransformingStore(&DeltaFIFO{}) // DeltaFIFO implements TransformingStore to allow memory optimizations ) var ( diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index ee9be77278a..6fd43375f60 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -80,7 +80,7 @@ type ReflectorStore interface { // TransformingStore is an optional interface that can be implemented by the provided store. // If implemented on the provided store reflector will use the same transformer in its internal stores. type TransformingStore interface { - Store + ReflectorStore Transformer() TransformFunc } @@ -726,9 +726,11 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) { return false } + var transformer TransformFunc storeOpts := []StoreOption{} if tr, ok := r.store.(TransformingStore); ok && tr.Transformer() != nil { - storeOpts = append(storeOpts, WithTransformer(tr.Transformer())) + transformer = tr.Transformer() + storeOpts = append(storeOpts, WithTransformer(transformer)) } initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name}) @@ -788,7 +790,7 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) { // we utilize the temporaryStore to ensure independence from the current store implementation. // as of today, the store is implemented as a queue and will be drained by the higher-level // component as soon as it finishes replacing the content. - checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, temporaryStore.List) + checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, transformer, temporaryStore.List) if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %w", err) diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go index a7e0d9c4368..4119c78a6c0 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go @@ -33,11 +33,11 @@ import ( // // Note that this function will panic when data inconsistency is detected. // This is intentional because we want to catch it in the CI. -func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { +func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], listItemTransformFunc func(interface{}) (interface{}, error), retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) { if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() { return } // for informers we pass an empty ListOptions because // listFn might be wrapped for filtering during informer construction. - consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn) + consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, listItemTransformFunc, metav1.ListOptions{}, retrieveItemsFn) } diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index 99e5fcd1800..1c12aa2d64a 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -539,16 +539,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) { s.startedLock.Lock() defer s.startedLock.Unlock() - var fifo Queue - if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) { - fifo = NewRealFIFO(MetaNamespaceKeyFunc, s.indexer, s.transform) - } else { - fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{ - KnownObjects: s.indexer, - EmitDeltaTypeReplaced: true, - Transformer: s.transform, - }) - } + fifo := newQueueFIFO(s.indexer, s.transform) cfg := &Config{ Queue: fifo, diff --git a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go index ef322bea850..b907410dc05 100644 --- a/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go @@ -61,7 +61,8 @@ type RealFIFO struct { } var ( - _ = Queue(&RealFIFO{}) // RealFIFO is a Queue + _ = Queue(&RealFIFO{}) // RealFIFO is a Queue + _ = TransformingStore(&RealFIFO{}) // RealFIFO implements TransformingStore to allow memory optimizations ) // Close the queue. diff --git a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go index 06f172d82bf..72c0124a0f1 100644 --- a/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go +++ b/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go @@ -45,16 +45,28 @@ func IsDataConsistencyDetectionForWatchListEnabled() bool { return dataConsistencyDetectionForWatchListEnabled } +// SetDataConsistencyDetectionForWatchListEnabledForTest allows to enable/disable data consistency detection for testing purposes. +// It returns a function that restores the original value. +func SetDataConsistencyDetectionForWatchListEnabledForTest(enabled bool) func() { + original := dataConsistencyDetectionForWatchListEnabled + dataConsistencyDetectionForWatchListEnabled = enabled + return func() { + dataConsistencyDetectionForWatchListEnabled = original + } +} + type RetrieveItemsFunc[U any] func() []U type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error) +type TransformFunc func(interface{}) (interface{}, error) + // CheckDataConsistency exists solely for testing purposes. // we cannot use checkWatchListDataConsistencyIfRequested because // it is guarded by an environmental variable. // we cannot manipulate the environmental variable because // it will affect other tests in this package. -func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { +func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listItemTransformFunc TransformFunc, listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) { if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) { klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity) return @@ -84,6 +96,15 @@ func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity if err != nil { panic(err) // this should never happen } + if listItemTransformFunc != nil { + for i := range rawListItems { + obj, err := listItemTransformFunc(rawListItems[i]) + if err != nil { + panic(err) + } + rawListItems[i] = obj.(runtime.Object) + } + } listItems := toMetaObjectSliceOrDie(rawListItems) sort.Sort(byUID(listItems)) diff --git a/vendor/modules.txt b/vendor/modules.txt index d3156340e00..18e984207d3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -169,8 +169,8 @@ github.com/google/uuid # github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 ## explicit; go 1.20 github.com/gorilla/websocket -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 -## explicit; go 1.23.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 +## explicit; go 1.24.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -242,15 +242,15 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal # github.com/prometheus/client_model v0.6.2 ## explicit; go 1.22.0 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.66.1 -## explicit; go 1.23.0 +# github.com/prometheus/common v0.67.4 +## explicit; go 1.24.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/otlptranslator v1.0.0 ## explicit; go 1.23.0 github.com/prometheus/otlptranslator -# github.com/prometheus/procfs v0.17.0 -## explicit; go 1.23.0 +# github.com/prometheus/procfs v0.19.2 +## explicit; go 1.24.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -286,25 +286,26 @@ github.com/wavesoftware/go-ensure # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# go.opentelemetry.io/auto/sdk v1.1.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/auto/sdk v1.2.1 +## explicit; go 1.24.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -# go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/instrumentation/runtime go.opentelemetry.io/contrib/instrumentation/runtime/internal/deprecatedruntime go.opentelemetry.io/contrib/instrumentation/runtime/internal/x -# go.opentelemetry.io/otel v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal +go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage @@ -312,85 +313,98 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal go.opentelemetry.io/otel/semconv/v1.20.0 -go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.37.0 go.opentelemetry.io/otel/semconv/v1.37.0/goconv go.opentelemetry.io/otel/semconv/v1.37.0/httpconv go.opentelemetry.io/otel/semconv/v1.37.0/otelconv go.opentelemetry.io/otel/semconv/v1.4.0 -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 -## explicit; go 1.23.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/counter go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/observ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.60.0 -## explicit; go 1.23.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/x +# go.opentelemetry.io/otel/exporters/prometheus v0.61.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 -## explicit; go 1.23.0 +go.opentelemetry.io/otel/exporters/prometheus/internal +go.opentelemetry.io/otel/exporters/prometheus/internal/counter +go.opentelemetry.io/otel/exporters/prometheus/internal/observ +go.opentelemetry.io/otel/exporters/prometheus/internal/x +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace +go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter +go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/observ go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x -# go.opentelemetry.io/otel/metric v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/sdk v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -go.opentelemetry.io/otel/sdk/trace/internal/x +go.opentelemetry.io/otel/sdk/trace/internal/env +go.opentelemetry.io/otel/sdk/trace/internal/observ go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/metric v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/sdk/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/observ +go.opentelemetry.io/otel/sdk/metric/internal/reservoir go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest -# go.opentelemetry.io/otel/trace v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/trace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.7.1 +# go.opentelemetry.io/proto/otlp v1.9.0 ## explicit; go 1.23.0 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -422,7 +436,7 @@ go.uber.org/zap/internal/stacktrace go.uber.org/zap/internal/ztest go.uber.org/zap/zapcore go.uber.org/zap/zaptest -# go.yaml.in/yaml/v2 v2.4.2 +# go.yaml.in/yaml/v2 v2.4.3 ## explicit; go 1.15 go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 @@ -449,15 +463,15 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.30.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.32.0 +## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.18.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.38.0 +# golang.org/x/sys v0.39.0 ## explicit; go 1.24.0 golang.org/x/sys/plan9 golang.org/x/sys/unix @@ -510,15 +524,15 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.5.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.75.0 -## explicit; go 1.23.0 +# google.golang.org/grpc v1.77.0 +## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -528,7 +542,6 @@ google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal -google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -538,6 +551,7 @@ google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/encoding/internal google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog @@ -581,7 +595,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.8 +# google.golang.org/protobuf v1.36.10 ## explicit; go 1.23 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson @@ -633,7 +647,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.34.2 +# k8s.io/api v0.34.3 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admissionregistration/v1 @@ -694,7 +708,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.2 +# k8s.io/apiextensions-apiserver v0.34.3 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -717,7 +731,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 -# k8s.io/apimachinery v0.34.2 +# k8s.io/apimachinery v0.34.3 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/apitesting/fuzzer @@ -781,10 +795,10 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.2 +# k8s.io/apiserver v0.34.3 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/storage/names -# k8s.io/client-go v0.34.2 +# k8s.io/client-go v0.34.3 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1120,7 +1134,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.34.2 +# k8s.io/code-generator v0.34.3 ## explicit; go 1.24.0 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -1223,7 +1237,7 @@ knative.dev/hack/schema/commands knative.dev/hack/schema/docs knative.dev/hack/schema/registry knative.dev/hack/schema/schema -# knative.dev/pkg v0.0.0-20251126013532-e853b1d1d6bb +# knative.dev/pkg v0.0.0-20251216131424-9cc841088d57 ## explicit; go 1.24.0 knative.dev/pkg/apiextensions/storageversion knative.dev/pkg/apiextensions/storageversion/cmd/migrate @@ -1379,7 +1393,7 @@ knative.dev/pkg/webhook/resourcesemantics knative.dev/pkg/webhook/resourcesemantics/conversion knative.dev/pkg/webhook/resourcesemantics/defaulting knative.dev/pkg/webhook/resourcesemantics/validation -# knative.dev/reconciler-test v0.0.0-20251126022416-3fd6f8701d7b +# knative.dev/reconciler-test v0.0.0-20251203013610-65cec2a81a56 ## explicit; go 1.24.0 knative.dev/reconciler-test/cmd/eventshub knative.dev/reconciler-test/pkg/environment