diff --git a/go.mod b/go.mod index e2575c9e..cc267cac 100644 --- a/go.mod +++ b/go.mod @@ -23,11 +23,11 @@ require ( github.com/stretchr/testify v1.11.1 github.com/zachfi/zkit v0.1.2 github.com/zachfi/znet v0.32.16 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 - go.opentelemetry.io/otel v1.38.0 - go.opentelemetry.io/otel/trace v1.38.0 - google.golang.org/grpc v1.75.1 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 + go.opentelemetry.io/otel v1.39.0 + go.opentelemetry.io/otel/trace v1.39.0 + google.golang.org/grpc v1.77.0 google.golang.org/protobuf v1.36.10 gopkg.in/yaml.v2 v2.4.0 k8s.io/apimachinery v0.34.0 @@ -160,10 +160,10 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 // indirect go.opentelemetry.io/otel/log v0.12.2 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect go.opentelemetry.io/otel/sdk/log v0.12.2 // indirect - go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect go.opentelemetry.io/proto/otlp v1.6.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect @@ -171,19 +171,19 @@ require ( go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.43.0 // indirect + golang.org/x/crypto v0.44.0 // indirect golang.org/x/mod v0.29.0 // indirect - golang.org/x/net v0.46.0 // indirect - golang.org/x/oauth2 v0.31.0 // indirect - golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/text v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.10.0 // indirect golang.org/x/tools v0.38.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 6f1e53aa..838dbacc 100644 --- a/go.sum +++ b/go.sum @@ -417,19 +417,19 @@ go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+ go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 h1:RN3ifU8y4prNWeEnQp2kRRHz8UwonAEYZl8tUzHEXAk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0/go.mod h1:habDz3tEWiFANTo6oUE99EmaFUrCNYAAg3wiVmusm70= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 h1:0tY123n7CdWMem7MOVdKOt0YfshufLCwfE5Bob+hQuM= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0/go.mod h1:CosX/aS4eHnG9D7nESYpV753l4j9q5j3SL/PUYd2lR8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 h1:UIrZgRBHUrYRlJ4V419lVb4rs2ar0wFzKNAebaP05XU= go.opentelemetry.io/contrib/propagators/jaeger v1.35.0/go.mod h1:0ciyFyYZxE6JqRAQvIgGRabKWDUmNdW3GAQb6y/RlFU= go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0 h1:bQ1Gvah4Sp8z7epSkgJaNTuZm7sutfA6Fji2/7cKFMc= go.opentelemetry.io/contrib/samplers/jaegerremote v0.30.0/go.mod h1:9b8Q9rH52NgYH3ShiTFB5wf18Vt3RTH/VMB7LDcC1ug= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= @@ -457,20 +457,20 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/ go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc h1:uqxdywfHqqCl6LmZzI3pUnXT1RGFYyUgxj0AkWPFxi0= go.opentelemetry.io/otel/sdk/log/logtest v0.0.0-20250521073539-a85ae98dcedc/go.mod h1:TY/N/FT7dmFrP/r5ym3g0yysP1DefqGpAZr4f82P0dE= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -491,8 +491,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -509,20 +509,20 @@ golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= -golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -537,15 +537,15 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -570,14 +570,14 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= -google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 296407f3..2dc8eaea 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -4,13 +4,14 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( - "google.golang.org/grpc/stats" + "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/stats" ) // ScopeName is the instrumentation scope name. @@ -39,6 +40,9 @@ type config struct { SpanAttributes []attribute.KeyValue MetricAttributes []attribute.KeyValue + PublicEndpoint bool + PublicEndpointFn func(ctx context.Context, info *stats.RPCTagInfo) bool + ReceivedEvent bool SentEvent bool } @@ -48,6 +52,12 @@ type Option interface { apply(*config) } +type optionFunc func(*config) + +func (f optionFunc) apply(c *config) { + f(c) +} + // newConfig returns a config configured with all the passed Options. func newConfig(opts []Option) *config { c := &config{ @@ -61,78 +71,70 @@ func newConfig(opts []Option) *config { return c } -type propagatorsOption struct{ p propagation.TextMapPropagator } +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} -func (o propagatorsOption) apply(c *config) { - if o.p != nil { - c.Propagators = o.p - } +// WithPublicEndpointFn runs with every request, and allows conditionally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(context.Context, *stats.RPCTagInfo) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) } // WithPropagators returns an Option to use the Propagators when extracting // and injecting trace context from requests. func WithPropagators(p propagation.TextMapPropagator) Option { - return propagatorsOption{p: p} -} - -type tracerProviderOption struct{ tp trace.TracerProvider } - -func (o tracerProviderOption) apply(c *config) { - if o.tp != nil { - c.TracerProvider = o.tp - } + return optionFunc(func(c *config) { + if p != nil { + c.Propagators = p + } + }) } // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. func WithInterceptorFilter(f InterceptorFilter) Option { - return interceptorFilterOption{f: f} -} - -type interceptorFilterOption struct { - f InterceptorFilter -} - -func (o interceptorFilterOption) apply(c *config) { - if o.f != nil { - c.InterceptorFilter = o.f - } + return optionFunc(func(c *config) { + if f != nil { + c.InterceptorFilter = f + } + }) } // WithFilter returns an Option to use the request filter. func WithFilter(f Filter) Option { - return filterOption{f: f} -} - -type filterOption struct { - f Filter -} - -func (o filterOption) apply(c *config) { - if o.f != nil { - c.Filter = o.f - } + return optionFunc(func(c *config) { + if f != nil { + c.Filter = f + } + }) } // WithTracerProvider returns an Option to use the TracerProvider when // creating a Tracer. func WithTracerProvider(tp trace.TracerProvider) Option { - return tracerProviderOption{tp: tp} -} - -type meterProviderOption struct{ mp metric.MeterProvider } - -func (o meterProviderOption) apply(c *config) { - if o.mp != nil { - c.MeterProvider = o.mp - } + return optionFunc(func(c *config) { + c.TracerProvider = tp + }) } // WithMeterProvider returns an Option to use the MeterProvider when // creating a Meter. If this option is not provide the global MeterProvider will be used. func WithMeterProvider(mp metric.MeterProvider) Option { - return meterProviderOption{mp: mp} + return optionFunc(func(c *config) { + c.MeterProvider = mp + }) } // Event type that can be recorded, see WithMessageEvents. @@ -144,21 +146,6 @@ const ( SentEvents ) -type messageEventsProviderOption struct { - events []Event -} - -func (m messageEventsProviderOption) apply(c *config) { - for _, e := range m.events { - switch e { - case ReceivedEvents: - c.ReceivedEvent = true - case SentEvents: - c.SentEvent = true - } - } -} - // WithMessageEvents configures the Handler to record the specified events // (span.AddEvent) on spans. By default only summary attributes are added at the // end of the request. @@ -167,43 +154,42 @@ func (m messageEventsProviderOption) apply(c *config) { // - ReceivedEvents: Record the number of bytes read after every gRPC read operation. // - SentEvents: Record the number of bytes written after every gRPC write operation. func WithMessageEvents(events ...Event) Option { - return messageEventsProviderOption{events: events} -} - -type spanStartOption struct{ opts []trace.SpanStartOption } - -func (o spanStartOption) apply(c *config) { - c.SpanStartOptions = append(c.SpanStartOptions, o.opts...) + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReceivedEvents: + c.ReceivedEvent = true + case SentEvents: + c.SentEvent = true + } + } + }) } // WithSpanOptions configures an additional set of // trace.SpanOptions, which are applied to each new span. +// +// Deprecated: It is only used by the deprecated interceptor, and is unused by [NewClientHandler] and [NewServerHandler]. func WithSpanOptions(opts ...trace.SpanStartOption) Option { - return spanStartOption{opts} -} - -type spanAttributesOption struct{ a []attribute.KeyValue } - -func (o spanAttributesOption) apply(c *config) { - if o.a != nil { - c.SpanAttributes = o.a - } + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) } // WithSpanAttributes returns an Option to add custom attributes to the spans. func WithSpanAttributes(a ...attribute.KeyValue) Option { - return spanAttributesOption{a: a} -} - -type metricAttributesOption struct{ a []attribute.KeyValue } - -func (o metricAttributesOption) apply(c *config) { - if o.a != nil { - c.MetricAttributes = o.a - } + return optionFunc(func(c *config) { + if a != nil { + c.SpanAttributes = a + } + }) } // WithMetricAttributes returns an Option to add custom attributes to the metrics. func WithMetricAttributes(a ...attribute.KeyValue) Option { - return metricAttributesOption{a: a} + return optionFunc(func(c *config) { + if a != nil { + c.MetricAttributes = append(c.MetricAttributes, a...) + } + }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index f63513d4..99f88ec3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -4,318 +4,18 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // gRPC tracing middleware -// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md +// https://opentelemetry.io/docs/specs/semconv/rpc/ import ( - "context" - "errors" - "io" "net" "strconv" - "google.golang.org/grpc" - grpc_codes "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.30.0" - "go.opentelemetry.io/otel/trace" -) - -type messageType attribute.KeyValue - -// Event adds an event of the messageType to the span associated with the -// passed context with a message id. -func (m messageType) Event(ctx context.Context, id int, _ interface{}) { - span := trace.SpanFromContext(ctx) - if !span.IsRecording() { - return - } - span.AddEvent("message", trace.WithAttributes( - attribute.KeyValue(m), - semconv.RPCMessageIDKey.Int(id), - )) -} - -var ( - messageSent = messageType(semconv.RPCMessageTypeSent) - messageReceived = messageType(semconv.RPCMessageTypeReceived) + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and -// SendMsg method call. -type clientStream struct { - grpc.ClientStream - desc *grpc.StreamDesc - - span trace.Span - - receivedEvent bool - sentEvent bool - - receivedMessageID int - sentMessageID int -} - -var _ = proto.Marshal - -func (w *clientStream) RecvMsg(m interface{}) error { - err := w.ClientStream.RecvMsg(m) - - if err == nil && !w.desc.ServerStreams { - w.endSpan(nil) - } else if errors.Is(err, io.EOF) { - w.endSpan(nil) - } else if err != nil { - w.endSpan(err) - } else { - w.receivedMessageID++ - - if w.receivedEvent { - messageReceived.Event(w.Context(), w.receivedMessageID, m) - } - } - - return err -} - -func (w *clientStream) SendMsg(m interface{}) error { - err := w.ClientStream.SendMsg(m) - - w.sentMessageID++ - - if w.sentEvent { - messageSent.Event(w.Context(), w.sentMessageID, m) - } - - if err != nil { - w.endSpan(err) - } - - return err -} - -func (w *clientStream) Header() (metadata.MD, error) { - md, err := w.ClientStream.Header() - if err != nil { - w.endSpan(err) - } - - return md, err -} - -func (w *clientStream) CloseSend() error { - err := w.ClientStream.CloseSend() - if err != nil { - w.endSpan(err) - } - - return err -} - -func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { - return &clientStream{ - ClientStream: s, - span: span, - desc: desc, - receivedEvent: cfg.ReceivedEvent, - sentEvent: cfg.SentEvent, - } -} - -func (w *clientStream) endSpan(err error) { - if err != nil { - s, _ := status.FromError(err) - w.span.SetStatus(codes.Error, s.Message()) - w.span.SetAttributes(statusCodeAttr(s.Code())) - } else { - w.span.SetAttributes(statusCodeAttr(grpc_codes.OK)) - } - - w.span.End() -} - -// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.NewClient call. -// -// Deprecated: Use [NewClientHandler] instead. -func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { - cfg := newConfig(opts) - tracer := cfg.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(Version()), - ) - - return func( - ctx context.Context, - desc *grpc.StreamDesc, - cc *grpc.ClientConn, - method string, - streamer grpc.Streamer, - callOpts ...grpc.CallOption, - ) (grpc.ClientStream, error) { - i := &InterceptorInfo{ - Method: method, - Type: StreamClient, - } - if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { - return streamer(ctx, desc, cc, method, callOpts...) - } - - name, attr := telemetryAttributes(method, cc.Target()) - - startOpts := append([]trace.SpanStartOption{ - trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attr...), - }, - cfg.SpanStartOptions..., - ) - - ctx, span := tracer.Start( - ctx, - name, - startOpts..., - ) - - ctx = inject(ctx, cfg.Propagators) - - s, err := streamer(ctx, desc, cc, method, callOpts...) - if err != nil { - grpcStatus, _ := status.FromError(err) - span.SetStatus(codes.Error, grpcStatus.Message()) - span.SetAttributes(statusCodeAttr(grpcStatus.Code())) - span.End() - return s, err - } - stream := wrapClientStream(s, desc, span, cfg) - return stream, nil - } -} - -// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and -// SendMsg method call. -type serverStream struct { - grpc.ServerStream - ctx context.Context - - receivedMessageID int - sentMessageID int - - receivedEvent bool - sentEvent bool -} - -func (w *serverStream) Context() context.Context { - return w.ctx -} - -func (w *serverStream) RecvMsg(m interface{}) error { - err := w.ServerStream.RecvMsg(m) - - if err == nil { - w.receivedMessageID++ - if w.receivedEvent { - messageReceived.Event(w.Context(), w.receivedMessageID, m) - } - } - - return err -} - -func (w *serverStream) SendMsg(m interface{}) error { - err := w.ServerStream.SendMsg(m) - - w.sentMessageID++ - if w.sentEvent { - messageSent.Event(w.Context(), w.sentMessageID, m) - } - - return err -} - -func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream { - return &serverStream{ - ServerStream: ss, - ctx: ctx, - receivedEvent: cfg.ReceivedEvent, - sentEvent: cfg.SentEvent, - } -} - -// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable -// for use in a grpc.NewServer call. -// -// Deprecated: Use [NewServerHandler] instead. -func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - cfg := newConfig(opts) - tracer := cfg.TracerProvider.Tracer( - ScopeName, - trace.WithInstrumentationVersion(Version()), - ) - - return func( - srv interface{}, - ss grpc.ServerStream, - info *grpc.StreamServerInfo, - handler grpc.StreamHandler, - ) error { - ctx := ss.Context() - i := &InterceptorInfo{ - StreamServerInfo: info, - Type: StreamServer, - } - if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { - return handler(srv, wrapServerStream(ctx, ss, cfg)) - } - - ctx = extract(ctx, cfg.Propagators) - name, attr := telemetryAttributes(info.FullMethod, peerFromCtx(ctx)) - - startOpts := append([]trace.SpanStartOption{ - trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attr...), - }, - cfg.SpanStartOptions..., - ) - - ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), - name, - startOpts..., - ) - defer span.End() - - err := handler(srv, wrapServerStream(ctx, ss, cfg)) - if err != nil { - s, _ := status.FromError(err) - statusCode, msg := serverStatus(s) - span.SetStatus(statusCode, msg) - span.SetAttributes(statusCodeAttr(s.Code())) - } else { - span.SetAttributes(statusCodeAttr(grpc_codes.OK)) - } - - return err - } -} - -// telemetryAttributes returns a span name and span and metric attributes from -// the gRPC method and peer address. -func telemetryAttributes(fullMethod, sererAddr string) (string, []attribute.KeyValue) { - name, methodAttrs := internal.ParseFullMethod(fullMethod) - srvAttrs := serverAddrAttrs(sererAddr) - - attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(srvAttrs)) - attrs = append(attrs, semconv.RPCSystemGRPC) - attrs = append(attrs, methodAttrs...) - attrs = append(attrs, srvAttrs...) - return name, attrs -} - // serverAddrAttrs returns the server address attributes for the hostport. func serverAddrAttrs(hostport string) []attribute.KeyValue { h, pStr, err := net.SplitHostPort(hostport) @@ -333,20 +33,6 @@ func serverAddrAttrs(hostport string) []attribute.KeyValue { } } -// peerFromCtx returns a peer address from a context, if one exists. -func peerFromCtx(ctx context.Context) string { - p, ok := peer.FromContext(ctx) - if !ok { - return "" - } - return p.Addr.String() -} - -// statusCodeAttr returns status code attribute based on given gRPC code. -func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { - return semconv.RPCGRPCStatusCodeKey.Int64(int64(c)) -} - // serverStatus returns a span status code and message for a given gRPC // status code. It maps specific gRPC status codes to a corresponding span // status code and message. This function is intended for use on the server diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index 1fa73c2f..e46185e0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.30.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // ParseFullMethod returns a span name following the OpenTelemetry semantic diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index 6e67f021..4c62341d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -6,21 +6,18 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g import ( "context" - "google.golang.org/grpc/metadata" - - "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/metadata" ) type metadataSupplier struct { - metadata *metadata.MD + metadata metadata.MD } // assert that metadataSupplier implements the TextMapCarrier interface. -var _ propagation.TextMapCarrier = &metadataSupplier{} +var _ propagation.TextMapCarrier = metadataSupplier{} -func (s *metadataSupplier) Get(key string) string { +func (s metadataSupplier) Get(key string) string { values := s.metadata.Get(key) if len(values) == 0 { return "" @@ -28,51 +25,27 @@ func (s *metadataSupplier) Get(key string) string { return values[0] } -func (s *metadataSupplier) Set(key string, value string) { +func (s metadataSupplier) Set(key, value string) { s.metadata.Set(key, value) } -func (s *metadataSupplier) Keys() []string { - out := make([]string, 0, len(*s.metadata)) - for key := range *s.metadata { +func (s metadataSupplier) Keys() []string { + out := make([]string, 0, len(s.metadata)) + for key := range s.metadata { out = append(out, key) } return out } -// Inject injects correlation context and span context into the gRPC -// metadata object. This function is meant to be used on outgoing -// requests. -// Deprecated: Unnecessary public func. -func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { - c := newConfig(opts) - c.Propagators.Inject(ctx, &metadataSupplier{ - metadata: md, - }) -} - func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { md, ok := metadata.FromOutgoingContext(ctx) if !ok { md = metadata.MD{} } - propagators.Inject(ctx, &metadataSupplier{ - metadata: &md, - }) - return metadata.NewOutgoingContext(ctx, md) -} - -// Extract returns the correlation context and span context that -// another service encoded in the gRPC metadata object with Inject. -// This function is meant to be used on incoming requests. -// Deprecated: Unnecessary public func. -func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { - c := newConfig(opts) - ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + propagators.Inject(ctx, metadataSupplier{ metadata: md, }) - - return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) + return metadata.NewOutgoingContext(ctx, md) } func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { @@ -81,7 +54,7 @@ func extract(ctx context.Context, propagators propagation.TextMapPropagator) con md = metadata.MD{} } - return propagators.Extract(ctx, &metadataSupplier{ - metadata: &md, + return propagators.Extract(ctx, metadataSupplier{ + metadata: md, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 9bec51df..278f6d0d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -8,18 +8,17 @@ import ( "sync/atomic" "time" - grpc_codes "google.golang.org/grpc/codes" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - semconv "go.opentelemetry.io/otel/semconv/v1.30.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv" "go.opentelemetry.io/otel/trace" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) @@ -27,10 +26,11 @@ import ( type gRPCContextKey struct{} type gRPCContext struct { - inMessages int64 - outMessages int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + metricAttrSet attribute.Set + record bool } type serverHandler struct { @@ -38,11 +38,11 @@ type serverHandler struct { tracer trace.Tracer - duration metric.Float64Histogram - inSize metric.Int64Histogram - outSize metric.Int64Histogram - inMsg metric.Int64Histogram - outMsg metric.Int64Histogram + duration rpcconv.ServerDuration + inSize int64Hist + outSize int64Hist + inMsg rpcconv.ServerRequestsPerRPC + outMsg rpcconv.ServerResponsesPerRPC } // NewServerHandler creates a stats.Handler for a gRPC server. @@ -62,76 +62,41 @@ func NewServerHandler(opts ...Option) stats.Handler { ) var err error - h.duration, err = meter.Float64Histogram( - semconv.RPCServerDurationName, - metric.WithDescription(semconv.RPCServerDurationDescription), - metric.WithUnit(semconv.RPCServerDurationUnit), - ) + h.duration, err = rpcconv.NewServerDuration(meter) if err != nil { otel.Handle(err) - if h.duration == nil { - h.duration = noop.Float64Histogram{} - } } - h.inSize, err = meter.Int64Histogram( - semconv.RPCServerRequestSizeName, - metric.WithDescription(semconv.RPCServerRequestSizeDescription), - metric.WithUnit(semconv.RPCServerRequestSizeUnit), - ) + h.inSize, err = rpcconv.NewServerRequestSize(meter) if err != nil { otel.Handle(err) - if h.inSize == nil { - h.inSize = noop.Int64Histogram{} - } } - h.outSize, err = meter.Int64Histogram( - semconv.RPCServerResponseSizeName, - metric.WithDescription(semconv.RPCServerResponseSizeDescription), - metric.WithUnit(semconv.RPCServerResponseSizeUnit), - ) + h.outSize, err = rpcconv.NewServerResponseSize(meter) if err != nil { otel.Handle(err) - if h.outSize == nil { - h.outSize = noop.Int64Histogram{} - } } - h.inMsg, err = meter.Int64Histogram( - semconv.RPCServerRequestsPerRPCName, - metric.WithDescription(semconv.RPCServerRequestsPerRPCDescription), - metric.WithUnit(semconv.RPCServerRequestsPerRPCUnit), - ) + h.inMsg, err = rpcconv.NewServerRequestsPerRPC(meter) if err != nil { otel.Handle(err) - if h.inMsg == nil { - h.inMsg = noop.Int64Histogram{} - } } - h.outMsg, err = meter.Int64Histogram( - semconv.RPCServerResponsesPerRPCName, - metric.WithDescription(semconv.RPCServerResponsesPerRPCDescription), - metric.WithUnit(semconv.RPCServerResponsesPerRPCUnit), - ) + h.outMsg, err = rpcconv.NewServerResponsesPerRPC(meter) if err != nil { otel.Handle(err) - if h.outMsg == nil { - h.outMsg = noop.Int64Histogram{} - } } return h } // TagConn can attach some information to the given context. -func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { +func (*serverHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { return ctx } // HandleConn processes the Conn stats. -func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) { +func (*serverHandler) HandleConn(context.Context, stats.ConnStats) { } // TagRPC can attach some information to the given context. @@ -147,11 +112,24 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont } if record { + // Make a new slice to avoid aliasing into the same attrs slice used by metrics. + spanAttributes := make([]attribute.KeyValue, 0, len(attrs)+len(h.SpanAttributes)) + spanAttributes = append(append(spanAttributes, attrs...), h.SpanAttributes...) + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(spanAttributes...), + } + if h.PublicEndpoint || (h.PublicEndpointFn != nil && h.PublicEndpointFn(ctx, info)) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } ctx, _ = h.tracer.Start( trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(append(attrs, h.SpanAttributes...)...), + opts..., ) } @@ -159,13 +137,23 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont metricAttrs: append(attrs, h.MetricAttributes...), record: record, } + gctx.metricAttrSet = attribute.NewSet(gctx.metricAttrs...) return context.WithValue(ctx, gRPCContextKey{}, &gctx) } // HandleRPC processes the RPC stats. func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - h.handleRPC(ctx, rs, h.duration, h.inSize, h.outSize, h.inMsg, h.outMsg, serverStatus) + h.handleRPC( + ctx, + rs, + h.duration.Inst(), + h.inSize, + h.outSize, + h.inMsg.Inst(), + h.outMsg.Inst(), + serverStatus, + ) } type clientHandler struct { @@ -173,11 +161,11 @@ type clientHandler struct { tracer trace.Tracer - duration metric.Float64Histogram - inSize metric.Int64Histogram - outSize metric.Int64Histogram - inMsg metric.Int64Histogram - outMsg metric.Int64Histogram + duration rpcconv.ClientDuration + inSize int64Hist + outSize int64Hist + inMsg rpcconv.ClientResponsesPerRPC + outMsg rpcconv.ClientRequestsPerRPC } // NewClientHandler creates a stats.Handler for a gRPC client. @@ -197,64 +185,29 @@ func NewClientHandler(opts ...Option) stats.Handler { ) var err error - h.duration, err = meter.Float64Histogram( - semconv.RPCClientDurationName, - metric.WithDescription(semconv.RPCClientDurationDescription), - metric.WithUnit(semconv.RPCClientDurationUnit), - ) + h.duration, err = rpcconv.NewClientDuration(meter) if err != nil { otel.Handle(err) - if h.duration == nil { - h.duration = noop.Float64Histogram{} - } } - h.outSize, err = meter.Int64Histogram( - semconv.RPCClientRequestSizeName, - metric.WithDescription(semconv.RPCClientRequestSizeDescription), - metric.WithUnit(semconv.RPCClientRequestSizeUnit), - ) + h.inSize, err = rpcconv.NewClientResponseSize(meter) if err != nil { otel.Handle(err) - if h.outSize == nil { - h.outSize = noop.Int64Histogram{} - } } - h.inSize, err = meter.Int64Histogram( - semconv.RPCClientResponseSizeName, - metric.WithDescription(semconv.RPCClientResponseSizeDescription), - metric.WithUnit(semconv.RPCClientResponseSizeUnit), - ) + h.outSize, err = rpcconv.NewClientRequestSize(meter) if err != nil { otel.Handle(err) - if h.inSize == nil { - h.inSize = noop.Int64Histogram{} - } } - h.outMsg, err = meter.Int64Histogram( - semconv.RPCClientRequestsPerRPCName, - metric.WithDescription(semconv.RPCClientRequestsPerRPCDescription), - metric.WithUnit(semconv.RPCClientRequestsPerRPCUnit), - ) + h.inMsg, err = rpcconv.NewClientResponsesPerRPC(meter) if err != nil { otel.Handle(err) - if h.outMsg == nil { - h.outMsg = noop.Int64Histogram{} - } } - h.inMsg, err = meter.Int64Histogram( - semconv.RPCClientResponsesPerRPCName, - metric.WithDescription(semconv.RPCClientResponsesPerRPCDescription), - metric.WithUnit(semconv.RPCClientResponsesPerRPCUnit), - ) + h.outMsg, err = rpcconv.NewClientRequestsPerRPC(meter) if err != nil { otel.Handle(err) - if h.inMsg == nil { - h.inMsg = noop.Int64Histogram{} - } } return h @@ -271,11 +224,14 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont } if record { + // Make a new slice to avoid aliasing into the same attrs slice used by metrics. + spanAttributes := make([]attribute.KeyValue, 0, len(attrs)+len(h.SpanAttributes)) + spanAttributes = append(append(spanAttributes, attrs...), h.SpanAttributes...) ctx, _ = h.tracer.Start( ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(append(attrs, h.SpanAttributes...)...), + trace.WithAttributes(spanAttributes...), ) } @@ -283,6 +239,7 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont metricAttrs: append(attrs, h.MetricAttributes...), record: record, } + gctx.metricAttrSet = attribute.NewSet(gctx.metricAttrs...) return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.Propagators) } @@ -290,7 +247,13 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont // HandleRPC processes the RPC stats. func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { h.handleRPC( - ctx, rs, h.duration, h.inSize, h.outSize, h.inMsg, h.outMsg, + ctx, + rs, + h.duration.Inst(), + h.inSize, + h.outSize, + h.inMsg.Inst(), + h.outMsg.Inst(), func(s *status.Status) (codes.Code, string) { return codes.Error, s.Message() }, @@ -298,20 +261,25 @@ func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { } // TagConn can attach some information to the given context. -func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { +func (*clientHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { return ctx } // HandleConn processes the Conn stats. -func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { +func (*clientHandler) HandleConn(context.Context, stats.ConnStats) { // no-op } +type int64Hist interface { + RecordSet(context.Context, int64, attribute.Set) +} + func (c *config) handleRPC( ctx context.Context, rs stats.RPCStats, duration metric.Float64Histogram, - inSize, outSize, inMsg, outMsg metric.Int64Histogram, + inSize, outSize int64Hist, + inMsg, outMsg metric.Int64Histogram, recordStatus func(*status.Status) (codes.Code, string), ) { gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) @@ -327,7 +295,7 @@ func (c *config) handleRPC( case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.inMessages, 1) - inSize.Record(ctx, int64(rs.Length), metric.WithAttributes(gctx.metricAttrs...)) + inSize.RecordSet(ctx, int64(rs.Length), gctx.metricAttrSet) } if c.ReceivedEvent && span.IsRecording() { @@ -343,7 +311,7 @@ func (c *config) handleRPC( case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.outMessages, 1) - outSize.Record(ctx, int64(rs.Length), metric.WithAttributes(gctx.metricAttrs...)) + outSize.RecordSet(ctx, int64(rs.Length), gctx.metricAttrSet) } if c.SentEvent && span.IsRecording() { @@ -384,6 +352,9 @@ func (c *config) handleRPC( var metricAttrs []attribute.KeyValue if gctx != nil { + // Don't use gctx.metricAttrSet here, because it requires passing + // multiple RecordOptions, which would call metric.mergeSets and + // allocate a new set for each Record call. metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index b1feeca4..98f148be 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,6 +5,6 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.61.0" + return "0.64.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index b25641c5..e980ab62 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -14,11 +14,17 @@ import ( // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. // Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. +// +// Deprecated: [DefaultClient] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. +// +// Deprecated: [Get] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, http.NoBody) if err != nil { return nil, err } @@ -26,8 +32,11 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) } // Head is a convenient replacement for http.Head that adds a span around the request. +// +// Deprecated: [Head] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, http.NoBody) if err != nil { return nil, err } @@ -35,6 +44,9 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error } // Post is a convenient replacement for http.Post that adds a span around the request. +// +// Deprecated: [Post] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { @@ -45,6 +57,9 @@ func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (r } // PostForm is a convenient replacement for http.PostForm that adds a span around the request. +// +// Deprecated: [PostForm] will be removed in a future release. +// Create your own [http.Client] based on the [Transport] example: https://pkg.go.dev/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp#example-NewTransport func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index 6bd50d4c..c3be7861 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,9 +8,8 @@ import ( "net/http" "net/http/httptrace" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -27,7 +26,6 @@ type config struct { Meter metric.Meter Propagators propagation.TextMapPropagator SpanStartOptions []trace.SpanStartOption - PublicEndpoint bool PublicEndpointFn func(*http.Request) bool ReadEvent bool WriteEvent bool @@ -97,17 +95,19 @@ func WithMeterProvider(provider metric.MeterProvider) Option { // WithPublicEndpoint configures the Handler to link the span with an incoming // span context. If this option is not provided, then the association is a child // association instead of a link. +// +// Deprecated: Use [WithPublicEndpointFn] instead. +// To migrate, replace WithPublicEndpoint() with: +// +// WithPublicEndpointFn(func(*http.Request) bool { return true }) func WithPublicEndpoint() Option { - return optionFunc(func(c *config) { - c.PublicEndpoint = true - }) + return WithPublicEndpointFn(func(*http.Request) bool { return true }) } // WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. -// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. func WithPublicEndpointFn(fn func(*http.Request) bool) Option { return optionFunc(func(c *config) { c.PublicEndpointFn = fn @@ -144,11 +144,13 @@ func WithFilter(f Filter) Option { }) } -type event int +// Event represents message event types for [WithMessageEvents]. +type Event int // Different types of events that can be recorded, see WithMessageEvents. const ( - ReadEvents event = iota + unspecifiedEvents Event = iota + ReadEvents WriteEvents ) @@ -161,7 +163,7 @@ const ( // using the ReadBytesKey // - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write // using the WriteBytesKey -func WithMessageEvents(events ...event) Option { +func WithMessageEvents(events ...Event) Option { return optionFunc(func(c *config) { for _, e := range events { switch e { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 56b24b98..1c9aa3ff 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -2,6 +2,5 @@ // SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended -// to be used to add tracing by wrapping existing handlers (with Handler) and -// routes WithRouteTag. +// to be used to add tracing by wrapping existing handlers. package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 937f9b4e..c1bbf3a3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -8,13 +8,13 @@ import ( "time" "github.com/felixge/httpsnoop" - - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" ) // middleware is an http middleware which wraps the next handler in a span. @@ -29,7 +29,6 @@ type middleware struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - publicEndpoint bool publicEndpointFn func(*http.Request) bool metricAttributesFn func(*http.Request) []attribute.KeyValue @@ -77,7 +76,6 @@ func (h *middleware) configure(c *config) { h.writeEvent = c.WriteEvent h.filters = c.Filters h.spanNameFormatter = c.SpanNameFormatter - h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) @@ -102,7 +100,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } opts = append(opts, h.spanStartOptions...) - if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + if h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx)) { opts = append(opts, trace.WithNewRoot()) // Linking incoming span context if any for public endpoint. if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { @@ -224,6 +222,9 @@ func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.Ke // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. +// +// Deprecated: spans are automatically annotated with the route attribute. +// To annotate metrics, use the [WithMetricAttributesFn] option. func WithRouteTag(route string, h http.Handler) http.Handler { attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go new file mode 100644 index 00000000..45d3d934 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/client.go @@ -0,0 +1,305 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/client.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "reflect" + "slices" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" +) + +type HTTPClient struct{ + requestBodySize httpconv.ClientRequestBodySize + requestDuration httpconv.ClientRequestDuration +} + +func NewHTTPClient(meter metric.Meter) HTTPClient { + client := HTTPClient{} + + var err error + client.requestBodySize, err = httpconv.NewClientRequestBodySize(meter) + handleErr(err) + + client.requestDuration, err = httpconv.NewClientRequestDuration( + meter, + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), + ) + handleErr(err) + + return client +} + +func (n HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconv.URLFull(u)) + + attrs = append(attrs, semconv.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconv.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconv.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconv.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconv.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n HTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconv.ErrorTypeOther + } + + return semconv.ErrorTypeKey.String(value) +} + +func (n HTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconv.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconv.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconv.HTTPRequestMethodGet, orig +} + +func (n HTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconv.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconv.ServerAddress(requestHost), + n.scheme(req), + ) + + if port > 0 { + attributes = append(attributes, semconv.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (n HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + + attributes := n.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + + return opts +} + +func (n HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { + n.requestBodySize.Inst().Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + n.requestDuration.Inst().Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) +} + +// TraceAttributes returns attributes for httptrace. +func (n HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconv.ServerAddress(host), + } +} + +func (n HTTPClient) scheme(req *http.Request) attribute.KeyValue { + if req.URL != nil && req.URL.Scheme != "" { + return semconv.URLScheme(req.URL.Scheme) + } + if req.TLS != nil { + return semconv.URLScheme("https") + } + return semconv.URLScheme("http") +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go deleted file mode 100644 index 7cb9693d..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ /dev/null @@ -1,323 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/env.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" -) - -// OTelSemConvStabilityOptIn is an environment variable. -// That can be set to "http/dup" to keep getting the old HTTP semantic conventions. -const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" - -type ResponseTelemetry struct { - StatusCode int - ReadBytes int64 - ReadError error - WriteBytes int64 - WriteError error -} - -type HTTPServer struct { - duplicate bool - - // Old metrics - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram - - // New metrics - requestBodySizeHistogram metric.Int64Histogram - responseBodySizeHistogram metric.Int64Histogram - requestDurationHistogram metric.Float64Histogram -} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) - if s.duplicate { - return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs) - } - return attrs -} - -func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { - if s.duplicate { - return []attribute.KeyValue{ - OldHTTPServer{}.NetworkTransportAttr(network), - CurrentHTTPServer{}.NetworkTransportAttr(network), - } - } - return []attribute.KeyValue{ - CurrentHTTPServer{}.NetworkTransportAttr(network), - } -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp) - if s.duplicate { - return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs) - } - return attrs -} - -// Route returns the attribute for the route. -func (s HTTPServer) Route(route string) attribute.KeyValue { - return CurrentHTTPServer{}.Route(route) -} - -// Status returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (s HTTPServer) Status(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} - -type ServerMetricData struct { - ServerName string - ResponseSize int64 - - MetricData - MetricAttributes -} - -type MetricAttributes struct { - Req *http.Request - StatusCode int - AdditionalAttributes []attribute.KeyValue -} - -type MetricData struct { - RequestSize int64 - - // The request duration, in milliseconds - ElapsedTime float64 -} - -var ( - metricAddOptionPool = &sync.Pool{ - New: func() interface{} { - return &[]metric.AddOption{} - }, - } - - metricRecordOptionPool = &sync.Pool{ - New: func() interface{} { - return &[]metric.RecordOption{} - }, - } -) - -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { - attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) - *recordOpts = append(*recordOpts, o) - s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) - s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) - s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o) - *recordOpts = (*recordOpts)[:0] - metricRecordOptionPool.Put(recordOpts) - } - - if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { - attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) - *addOpts = append(*addOpts, o) - s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) - *addOpts = (*addOpts)[:0] - metricAddOptionPool.Put(addOpts) - } -} - -// hasOptIn returns true if the comma-separated version string contains the -// exact optIn value. -func hasOptIn(version, optIn string) bool { - for _, v := range strings.Split(version, ",") { - if strings.TrimSpace(v) == optIn { - return true - } - } - return false -} - -func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := hasOptIn(env, "http/dup") - server := HTTPServer{ - duplicate: duplicate, - } - server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) - if duplicate { - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) - } - return server -} - -type HTTPClient struct { - duplicate bool - - // old metrics - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram - - // new metrics - requestBodySize metric.Int64Histogram - requestDuration metric.Float64Histogram -} - -func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := hasOptIn(env, "http/dup") - client := HTTPClient{ - duplicate: duplicate, - } - client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) - if duplicate { - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) - } - - return client -} - -// RequestTraceAttrs returns attributes for an HTTP request made by a client. -func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.RequestTraceAttrs(req) - if c.duplicate { - return OldHTTPClient{}.RequestTraceAttrs(req, attrs) - } - return attrs -} - -// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. -func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp) - if c.duplicate { - return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs) - } - return attrs -} - -func (c HTTPClient) Status(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -func (c HTTPClient) ErrorType(err error) attribute.KeyValue { - return CurrentHTTPClient{}.ErrorType(err) -} - -type MetricOpts struct { - measurement metric.MeasurementOption - addOptions metric.AddOption -} - -func (o MetricOpts) MeasurementOption() metric.MeasurementOption { - return o.measurement -} - -func (o MetricOpts) AddOptions() metric.AddOption { - return o.addOptions -} - -func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { - opts := map[string]MetricOpts{} - - attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["new"] = MetricOpts{ - measurement: set, - addOptions: set, - } - - if c.duplicate { - attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["old"] = MetricOpts{ - measurement: set, - addOptions: set, - } - } - - return opts -} - -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { - if s.requestBodySize == nil || s.requestDuration == nil { - // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). - return - } - - s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) - s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) - - if s.duplicate { - s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - } -} - -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { - if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). - return - } - - s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) -} - -func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { - attrs := CurrentHTTPClient{}.TraceAttributes(host) - if s.duplicate { - return OldHTTPClient{}.TraceAttributes(host, attrs) - } - - return attrs -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go index f2cf8a15..a8a0d58d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -5,10 +5,11 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/ // Generate semconv package: //go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/common_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=common_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/server.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=server.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/server_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=server_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/client.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=client.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/client_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=client_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconvtest_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconvtest_test.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go //go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go deleted file mode 100644 index 53976b0d..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ /dev/null @@ -1,573 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv provides OpenTelemetry semantic convention types and -// functionality. -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "fmt" - "net/http" - "reflect" - "slices" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" -) - -type RequestTraceAttrsOpts struct { - // If set, this is used as value for the "http.client_ip" attribute. - HTTPClientIP string -} - -type CurrentHTTPServer struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { - count := 3 // ServerAddress, Method, Scheme - - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - count++ - } - - method, methodOriginal := n.method(req.Method) - if methodOriginal != (attribute.KeyValue{}) { - count++ - } - - scheme := n.scheme(req.TLS != nil) - - peer, peerPort := SplitHostPort(req.RemoteAddr) - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - count++ - if peerPort > 0 { - count++ - } - } - - useragent := req.UserAgent() - if useragent != "" { - count++ - } - - // For client IP, use, in order: - // 1. The value passed in the options - // 2. The value in the X-Forwarded-For header - // 3. The peer address - clientIP := opts.HTTPClientIP - if clientIP == "" { - clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP == "" { - clientIP = peer - } - } - if clientIP != "" { - count++ - } - - if req.URL != nil && req.URL.Path != "" { - count++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - count++ - } - if protoVersion != "" { - count++ - } - - route := httpRoute(req.Pattern) - if route != "" { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - attrs = append(attrs, - semconvNew.ServerAddress(host), - method, - scheme, - ) - - if hostPort > 0 { - attrs = append(attrs, semconvNew.ServerPort(hostPort)) - } - if methodOriginal != (attribute.KeyValue{}) { - attrs = append(attrs, methodOriginal) - } - - if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) - if peerPort > 0 { - attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, semconvNew.ClientAddress(clientIP)) - } - - if req.URL != nil && req.URL.Path != "" { - attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if route != "" { - attrs = append(attrs, n.Route(route)) - } - - return attrs -} - -func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { - switch network { - case "tcp", "tcp4", "tcp6": - return semconvNew.NetworkTransportTCP - case "udp", "udp4", "udp6": - return semconvNew.NetworkTransportUDP - case "unix", "unixgram", "unixpacket": - return semconvNew.NetworkTransportUnix - default: - return semconvNew.NetworkTransportPipe - } -} - -func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP -// response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will -// be omitted. -func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - var count int - - if resp.ReadBytes > 0 { - count++ - } - if resp.WriteBytes > 0 { - count++ - } - if resp.StatusCode > 0 { - count++ - } - - attributes := make([]attribute.KeyValue, 0, count) - - if resp.ReadBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), - ) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), - ) - } - if resp.StatusCode > 0 { - attributes = append(attributes, - semconvNew.HTTPResponseStatusCode(resp.StatusCode), - ) - } - - return attributes -} - -// Route returns the attribute for the route. -func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { - return semconvNew.HTTPRoute(route) -} - -func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} - } - - var err error - requestBodySizeHistogram, err := meter.Int64Histogram( - semconvNew.HTTPServerRequestBodySizeName, - metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), - metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), - ) - handleErr(err) - - responseBodySizeHistogram, err := meter.Int64Histogram( - semconvNew.HTTPServerResponseBodySizeName, - metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), - metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), - ) - handleErr(err) - requestDurationHistogram, err := meter.Float64Histogram( - semconvNew.HTTPServerRequestDurationName, - metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), - metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), - metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), - ) - handleErr(err) - - return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram -} - -func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - num := len(additionalAttributes) + 3 - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - num++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - num++ - } - if protoVersion != "" { - num++ - } - - if statusCode > 0 { - num++ - } - - attributes := slices.Grow(additionalAttributes, num) - attributes = append(attributes, - semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), - n.scheme(req.TLS != nil), - semconvNew.ServerAddress(host)) - - if hostPort > 0 { - attributes = append(attributes, semconvNew.ServerPort(hostPort)) - } - if protoName != "" { - attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) - } - return attributes -} - -type CurrentHTTPClient struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - /* - below attributes are returned: - - http.request.method - - http.request.method.original - - url.full - - server.address - - server.port - - network.protocol.name - - network.protocol.version - */ - numOfAttributes := 3 // URL, server address, proto, and method. - - var urlHost string - if req.URL != nil { - urlHost = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if eligiblePort > 0 { - numOfAttributes++ - } - useragent := req.UserAgent() - if useragent != "" { - numOfAttributes++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - numOfAttributes++ - } - if protoVersion != "" { - numOfAttributes++ - } - - method, originalMethod := n.method(req.Method) - if originalMethod != (attribute.KeyValue{}) { - numOfAttributes++ - } - - attrs := make([]attribute.KeyValue, 0, numOfAttributes) - - attrs = append(attrs, method) - if originalMethod != (attribute.KeyValue{}) { - attrs = append(attrs, originalMethod) - } - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, semconvNew.URLFull(u)) - - attrs = append(attrs, semconvNew.ServerAddress(requestHost)) - if eligiblePort > 0 { - attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - return attrs -} - -// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - /* - below attributes are returned: - - http.response.status_code - - error.type - */ - var count int - if resp.StatusCode > 0 { - count++ - } - - if isErrorStatusCode(resp.StatusCode) { - count++ - } - - attrs := make([]attribute.KeyValue, 0, count) - if resp.StatusCode > 0 { - attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) - } - - if isErrorStatusCode(resp.StatusCode) { - errorType := strconv.Itoa(resp.StatusCode) - attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) - } - return attrs -} - -func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) - } - - if value == "" { - return semconvNew.ErrorTypeOther - } - - return semconvNew.ErrorTypeKey.String(value) -} - -func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { - if method == "" { - return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} - } - if attr, ok := methodLookup[method]; ok { - return attr, attribute.KeyValue{} - } - - orig := semconvNew.HTTPRequestMethodOriginal(method) - if attr, ok := methodLookup[strings.ToUpper(method)]; ok { - return attr, orig - } - return semconvNew.HTTPRequestMethodGet, orig -} - -func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Histogram{}, noop.Float64Histogram{} - } - - var err error - requestBodySize, err := meter.Int64Histogram( - semconvNew.HTTPClientRequestBodySizeName, - metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), - metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), - ) - handleErr(err) - - requestDuration, err := meter.Float64Histogram( - semconvNew.HTTPClientRequestDurationName, - metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), - metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), - metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), - ) - handleErr(err) - - return requestBodySize, requestDuration -} - -func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - num := len(additionalAttributes) + 2 - var h string - if req.URL != nil { - h = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if port > 0 { - num++ - } - - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - num++ - } - if protoVersion != "" { - num++ - } - - if statusCode > 0 { - num++ - } - - attributes := slices.Grow(additionalAttributes, num) - attributes = append(attributes, - semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), - semconvNew.ServerAddress(requestHost), - n.scheme(req), - ) - - if port > 0 { - attributes = append(attributes, semconvNew.ServerPort(port)) - } - if protoName != "" { - attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) - } - return attributes -} - -// TraceAttributes returns attributes for httptrace. -func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { - return []attribute.KeyValue{ - semconvNew.ServerAddress(host), - } -} - -func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue { - if req.URL != nil && req.URL.Scheme != "" { - return semconvNew.URLScheme(req.URL.Scheme) - } - if req.TLS != nil { - return semconvNew.URLScheme("https") - } - return semconvNew.URLScheme("http") -} - -func isErrorStatusCode(code int) bool { - return code >= 400 || code < 100 -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go new file mode 100644 index 00000000..5ae6a073 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/server.go @@ -0,0 +1,403 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/server.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "slices" + "strings" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/httpconv" +) + +type RequestTraceAttrsOpts struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct{ + requestBodySizeHistogram httpconv.ServerRequestBodySize + responseBodySizeHistogram httpconv.ServerResponseBodySize + requestDurationHistogram httpconv.ServerRequestDuration +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + server := HTTPServer{} + + var err error + server.requestBodySizeHistogram, err = httpconv.NewServerRequestBodySize(meter) + handleErr(err) + + server.responseBodySizeHistogram, err = httpconv.NewServerResponseBodySize(meter) + handleErr(err) + + server.requestDurationHistogram, err = httpconv.NewServerRequestDuration( + meter, + metric.WithExplicitBucketBoundaries( + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, + 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10, + ), + ) + handleErr(err) + return server +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (n HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + peer, peerPort := SplitHostPort(req.RemoteAddr) + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + route := httpRoute(req.Pattern) + if route != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconv.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconv.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconv.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconv.NetworkPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, semconv.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconv.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconv.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconv.NetworkProtocolVersion(protoVersion)) + } + + if route != "" { + attrs = append(attrs, n.Route(route)) + } + + return attrs +} + +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { + attr := semconv.NetworkTransportPipe + switch network { + case "tcp", "tcp4", "tcp6": + attr = semconv.NetworkTransportTCP + case "udp", "udp4", "udp6": + attr = semconv.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + attr = semconv.NetworkTransportUnix + } + + return []attribute.KeyValue{attr} +} + +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { + Req *http.Request + StatusCode int + Route string + AdditionalAttributes []attribute.KeyValue +} + +type MetricData struct { + RequestSize int64 + + // The request duration, in milliseconds + ElapsedTime float64 +} + +var ( + metricAddOptionPool = &sync.Pool{ + New: func() any { + return &[]metric.AddOption{} + }, + } + + metricRecordOptionPool = &sync.Pool{ + New: func() any { + return &[]metric.RecordOption{} + }, + } +) + +func (n HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + attributes := n.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.Route, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + n.requestBodySizeHistogram.Inst().Record(ctx, md.RequestSize, *recordOpts...) + n.responseBodySizeHistogram.Inst().Record(ctx, md.ResponseSize, *recordOpts...) + n.requestDurationHistogram.Inst().Record(ctx, md.ElapsedTime/1000.0, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) +} + +func (n HTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconv.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconv.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconv.HTTPRequestMethodGet, orig +} + +func (n HTTPServer) scheme(https bool) attribute.KeyValue { //nolint:revive // ignore linter + if https { + return semconv.URLScheme("https") + } + return semconv.URLScheme("http") +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP +// response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will +// be omitted. +func (n HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconv.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconv.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconv.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n HTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +func (n HTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, route string, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + if route != "" { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconv.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconv.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPResponseStatusCode(statusCode)) + } + + if route != "" { + attributes = append(attributes, semconv.HTTPRoute(route)) + } + return attributes +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index bc1f7751..96422ad1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.37.0" ) // SplitHostPort splits a network address hostport of the form "host", @@ -53,10 +53,10 @@ func SplitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) // nolint: gosec // Byte size checked 16 above. + return host, int(p) //nolint:gosec // Byte size checked 16 above. } -func requiredHTTPPort(https bool, port int) int { // nolint:revive +func requiredHTTPPort(https bool, port int) int { //nolint:revive // ignore linter if https { if port > 0 && port != 443 { return port diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go deleted file mode 100644 index ba7fccf1..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconv/v120.0.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - -import ( - "errors" - "io" - "net/http" - "slices" - - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -type OldHTTPServer struct{} - -// RequestTraceAttrs returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs) -} - -func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { - return semconvutil.NetTransport(network) -} - -// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. -// -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue { - if resp.ReadBytes > 0 { - attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) - } - if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { - // This is not in the semantic conventions, but is historically provided - attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) - } - if resp.WriteBytes > 0 { - attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) - } - if resp.StatusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) - } - if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { - // This is not in the semantic conventions, but is historically provided - attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) - } - - return attributes -} - -// Route returns the attribute for the route. -func (o OldHTTPServer) Route(route string) attribute.KeyValue { - return semconv.HTTPRoute(route) -} - -// HTTPStatusCode returns the attribute for the HTTP status code. -// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. -func HTTPStatusCode(status int) attribute.KeyValue { - return semconv.HTTPStatusCode(status) -} - -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - -func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} - } - var err error - requestBytesCounter, err := meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - responseBytesCounter, err := meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - serverLatencyMeasure, err := meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) - - return requestBytesCounter, responseBytesCounter, serverLatencyMeasure -} - -func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - n := len(additionalAttributes) + 3 - var host string - var p int - if server == "" { - host, p = SplitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = SplitHostPort(server) - if p < 0 { - _, p = SplitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - n++ - } - if protoVersion != "" { - n++ - } - - if statusCode > 0 { - n++ - } - - attributes := slices.Grow(additionalAttributes, n) - attributes = append(attributes, - semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), - o.scheme(req.TLS != nil), - semconv.NetHostName(host)) - - if hostPort > 0 { - attributes = append(attributes, semconv.NetHostPort(hostPort)) - } - if protoName != "" { - attributes = append(attributes, semconv.NetProtocolName(protoName)) - } - if protoVersion != "" { - attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) - } - - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - return attributes -} - -func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return semconv.HTTPSchemeHTTPS - } - return semconv.HTTPSchemeHTTP -} - -type OldHTTPClient struct{} - -func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req, attrs) -} - -func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp, attrs) -} - -func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - http.status_code int - net.peer.name string - net.peer.port int - */ - - n := 2 // method, peer name. - var h string - if req.URL != nil { - h = req.URL.Host - } - var requestHost string - var requestPort int - for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = SplitHostPort(hostport) - if requestHost != "" || requestPort > 0 { - break - } - } - - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) - if port > 0 { - n++ - } - - if statusCode > 0 { - n++ - } - - attributes := slices.Grow(additionalAttributes, n) - attributes = append(attributes, - semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), - semconv.NetPeerName(requestHost), - ) - - if port > 0 { - attributes = append(attributes, semconv.NetPeerPort(port)) - } - - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - return attributes -} - -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Incoming request bytes total - clientResponseSize = "http.client.response.size" // Incoming response bytes total - clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds -) - -func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { - if meter == nil { - return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} - } - requestBytesCounter, err := meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - responseBytesCounter, err := meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - latencyMeasure, err := meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) - - return requestBytesCounter, responseBytesCounter, latencyMeasure -} - -// TraceAttributes returns attributes for httptrace. -func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue { - return append(attrs, semconv.NetHostName(host)) -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go deleted file mode 100644 index 7aa5f99e..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -// Generate semconvutil package: -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go -//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go deleted file mode 100644 index b9973547..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ /dev/null @@ -1,594 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/httpconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconvutil provides OpenTelemetry semantic convention utilities. -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "fmt" - "net/http" - "slices" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -type HTTPServerRequestOptions struct { - // If set, this is used as value for the "http.client_ip" attribute. - HTTPClientIP string -} - -// HTTPClientResponse returns trace attributes for an HTTP response received by a -// client from a server. It will return the following attributes if the related -// values are defined in resp: "http.status.code", -// "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// HTTPClientResponse(resp, ClientRequest(resp.Request))) -func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ClientResponse(resp, attrs) -} - -// HTTPClientRequest returns trace attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.url", "http.method", -// "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "user_agent.original", -// "http.request_content_length". -func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ClientRequest(req, attrs) -} - -// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. -// The following attributes are always returned: "http.method", "net.peer.name". -// The following attributes are returned if the -// related values are defined in req: "net.peer.port". -func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { - return hc.ClientRequestMetrics(req) -} - -// HTTPClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func HTTPClientStatus(code int) (codes.Code, string) { - return hc.ClientStatus(code) -} - -// HTTPServerRequest returns trace attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.target", "net.host.name". The following attributes are returned if -// they related values are defined in req: "net.host.port", "net.sock.peer.addr", -// "net.sock.peer.port", "user_agent.original", "http.client_ip". -func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { - return hc.ServerRequest(server, req, opts, attrs) -} - -// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a -// server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "net.host.name". The following attributes are returned if they related -// values are defined in req: "net.host.port". -func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequestMetrics(server, req) -} - -// HTTPServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func HTTPServerStatus(code int) (codes.Code, string) { - return hc.ServerStatus(code) -} - -// httpConv are the HTTP semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type httpConv struct { - NetConv *netConv - - HTTPClientIPKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPResponseContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - UserAgentOriginalKey attribute.Key -} - -var hc = &httpConv{ - NetConv: nc, - - HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPMethodKey: semconv.HTTPMethodKey, - HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, - HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, - HTTPRouteKey: semconv.HTTPRouteKey, - HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, - HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, - HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, - HTTPTargetKey: semconv.HTTPTargetKey, - HTTPURLKey: semconv.HTTPURLKey, - UserAgentOriginalKey: semconv.UserAgentOriginalKey, -} - -// ClientResponse returns attributes for an HTTP response received by a client -// from a server. The following attributes are returned if the related values -// are defined in resp: "http.status.code", "http.response_content_length". -// -// This does not add all OpenTelemetry required attributes for an HTTP event, -// it assumes ClientRequest was used to create the span with a complete set of -// attributes. If a complete set of attributes can be generated using the -// request contained in resp. For example: -// -// ClientResponse(resp, ClientRequest(resp.Request)) -func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.status_code int - http.response_content_length int - */ - var n int - if resp.StatusCode > 0 { - n++ - } - if resp.ContentLength > 0 { - n++ - } - if n == 0 { - return attrs - } - - attrs = slices.Grow(attrs, n) - if resp.StatusCode > 0 { - attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) - } - if resp.ContentLength > 0 { - attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) - } - return attrs -} - -// ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.method", -// "net.peer.name". The following attributes are returned if the related values -// are defined in req: "net.peer.port", "user_agent.original", -// "http.request_content_length", "user_agent.original". -func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - user_agent.original string - http.url string - net.peer.name string - net.peer.port int - http.request_content_length int - */ - - /* The following semantic conventions are not returned: - http.status_code This requires the response. See ClientResponse. - http.response_content_length This requires the response. See ClientResponse. - net.sock.family This requires the socket used. - net.sock.peer.addr This requires the socket used. - net.sock.peer.name This requires the socket used. - net.sock.peer.port This requires the socket used. - http.resend_count This is something outside of a single request. - net.protocol.name The value is the Request is ignored, and the go client will always use "http". - net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. - */ - n := 3 // URL, peer name, proto, and method. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - if req.ContentLength > 0 { - n++ - } - - attrs = slices.Grow(attrs, n) - attrs = append(attrs, c.method(req.Method)) - - var u string - if req.URL != nil { - // Remove any username/password info that may be in the URL. - userinfo := req.URL.User - req.URL.User = nil - u = req.URL.String() - // Restore any username/password info that was removed. - req.URL.User = userinfo - } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - if useragent != "" { - attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) - } - - if l := req.ContentLength; l > 0 { - attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) - } - - return attrs -} - -// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.method", "net.peer.name". -// The following attributes are returned if the related values -// are defined in req: "net.peer.port". -func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - net.peer.name string - net.peer.port int - */ - - n := 2 // method, peer name. - var h string - if req.URL != nil { - h = req.URL.Host - } - peer, p := firstHostPort(h, req.Header.Get("Host")) - port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) - if port > 0 { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) - - if port > 0 { - attrs = append(attrs, c.NetConv.PeerPort(port)) - } - - return attrs -} - -// ServerRequest returns attributes for an HTTP request received by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "http.target", "net.host.name". The following attributes are returned if they -// related values are defined in req: "net.host.port", "net.sock.peer.addr", -// "net.sock.peer.port", "user_agent.original", "http.client_ip", -// "net.protocol.name", "net.protocol.version". -func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.method string - http.scheme string - net.host.name string - net.host.port int - net.sock.peer.addr string - net.sock.peer.port int - user_agent.original string - http.client_ip string - net.protocol.name string Note: not set if the value is "http". - net.protocol.version string - http.target string Note: doesn't include the query parameter. - */ - - /* The following semantic conventions are not returned: - http.status_code This requires the response. - http.request_content_length This requires the len() of body, which can mutate it. - http.response_content_length This requires the response. - http.route This is not available. - net.sock.peer.name This would require a DNS lookup. - net.sock.host.addr The request doesn't have access to the underlying socket. - net.sock.host.port The request doesn't have access to the underlying socket. - - */ - n := 4 // Method, scheme, proto, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - peer, peerPort := splitHostPort(req.RemoteAddr) - if peer != "" { - n++ - if peerPort > 0 { - n++ - } - } - useragent := req.UserAgent() - if useragent != "" { - n++ - } - - // For client IP, use, in order: - // 1. The value passed in the options - // 2. The value in the X-Forwarded-For header - // 3. The peer address - clientIP := opts.HTTPClientIP - if clientIP == "" { - clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) - if clientIP == "" { - clientIP = peer - } - } - if clientIP != "" { - n++ - } - - var target string - if req.URL != nil { - target = req.URL.Path - if target != "" { - n++ - } - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" && protoName != "http" { - n++ - } - if protoVersion != "" { - n++ - } - - attrs = slices.Grow(attrs, n) - - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - - if peer != "" { - // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a - // file-path that would be interpreted with a sock family. - attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) - if peerPort > 0 { - attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) - } - } - - if useragent != "" { - attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) - } - - if clientIP != "" { - attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) - } - - if target != "" { - attrs = append(attrs, c.HTTPTargetKey.String(target)) - } - - if protoName != "" && protoName != "http" { - attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) - } - - return attrs -} - -// ServerRequestMetrics returns metric attributes for an HTTP request received -// by a server. -// -// The server must be the primary server name if it is known. For example this -// would be the ServerName directive -// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache -// server, and the server_name directive -// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an -// nginx server. More generically, the primary server name would be the host -// header value that matches the default virtual host of an HTTP server. It -// should include the host identifier and if a port is used to route to the -// server that port identifier should be included as an appropriate port -// suffix. -// -// If the primary server name is not known, server should be an empty string. -// The req Host will be used to determine the server instead. -// -// The following attributes are always returned: "http.method", "http.scheme", -// "net.host.name". The following attributes are returned if they related -// values are defined in req: "net.host.port". -func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - /* The following semantic conventions are returned if present: - http.scheme string - http.route string - http.method string - http.status_code int - net.host.name string - net.host.port int - net.protocol.name string Note: not set if the value is "http". - net.protocol.version string - */ - - n := 3 // Method, scheme, and host name. - var host string - var p int - if server == "" { - host, p = splitHostPort(req.Host) - } else { - // Prioritize the primary server name. - host, p = splitHostPort(server) - if p < 0 { - _, p = splitHostPort(req.Host) - } - } - hostPort := requiredHTTPPort(req.TLS != nil, p) - if hostPort > 0 { - n++ - } - protoName, protoVersion := netProtocol(req.Proto) - if protoName != "" { - n++ - } - if protoVersion != "" { - n++ - } - - attrs := make([]attribute.KeyValue, 0, n) - - attrs = append(attrs, c.methodMetric(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.NetConv.HostName(host)) - - if hostPort > 0 { - attrs = append(attrs, c.NetConv.HostPort(hostPort)) - } - if protoName != "" { - attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) - } - if protoVersion != "" { - attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) - } - - return attrs -} - -func (c *httpConv) method(method string) attribute.KeyValue { - if method == "" { - return c.HTTPMethodKey.String(http.MethodGet) - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return c.HTTPMethodKey.String(method) -} - -func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { - return c.HTTPSchemeHTTPS - } - return c.HTTPSchemeHTTP -} - -func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { - xForwardedFor = xForwardedFor[:idx] - } - return xForwardedFor -} - -func requiredHTTPPort(https bool, port int) int { // nolint:revive - if https { - if port > 0 && port != 443 { - return port - } - } else { - if port > 0 && port != 80 { - return port - } - } - return -1 -} - -// Return the request host and port from the first non-empty source. -func firstHostPort(source ...string) (host string, port int) { - for _, hostport := range source { - host, port = splitHostPort(hostport) - if host != "" || port > 0 { - break - } - } - return -} - -// ClientStatus returns a span status code and message for an HTTP status code -// value received by a client. -func (c *httpConv) ClientStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 400 { - return codes.Error, "" - } - return codes.Unset, "" -} - -// ServerStatus returns a span status code and message for an HTTP status code -// value returned by a server. Status codes in the 400-499 range are not -// returned as errors. -func (c *httpConv) ServerStatus(code int) (codes.Code, string) { - if code < 100 || code >= 600 { - return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) - } - if code >= 500 { - return codes.Error, "" - } - return codes.Unset, "" -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go deleted file mode 100644 index df97255e..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/semconvutil/netconv.go.tmpl - -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" - -import ( - "net" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" -) - -// NetTransport returns a trace attribute describing the transport protocol of the -// passed network. See the net.Dial for information about acceptable network -// values. -func NetTransport(network string) attribute.KeyValue { - return nc.Transport(network) -} - -// netConv are the network semantic convention attributes defined for a version -// of the OpenTelemetry specification. -type netConv struct { - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetProtocolName attribute.Key - NetProtocolVersion attribute.Key - NetSockFamilyKey attribute.Key - NetSockPeerAddrKey attribute.Key - NetSockPeerPortKey attribute.Key - NetSockHostAddrKey attribute.Key - NetSockHostPortKey attribute.Key - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportInProc attribute.KeyValue -} - -var nc = &netConv{ - NetHostNameKey: semconv.NetHostNameKey, - NetHostPortKey: semconv.NetHostPortKey, - NetPeerNameKey: semconv.NetPeerNameKey, - NetPeerPortKey: semconv.NetPeerPortKey, - NetProtocolName: semconv.NetProtocolNameKey, - NetProtocolVersion: semconv.NetProtocolVersionKey, - NetSockFamilyKey: semconv.NetSockFamilyKey, - NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, - NetSockPeerPortKey: semconv.NetSockPeerPortKey, - NetSockHostAddrKey: semconv.NetSockHostAddrKey, - NetSockHostPortKey: semconv.NetSockHostPortKey, - NetTransportOther: semconv.NetTransportOther, - NetTransportTCP: semconv.NetTransportTCP, - NetTransportUDP: semconv.NetTransportUDP, - NetTransportInProc: semconv.NetTransportInProc, -} - -func (c *netConv) Transport(network string) attribute.KeyValue { - switch network { - case "tcp", "tcp4", "tcp6": - return c.NetTransportTCP - case "udp", "udp4", "udp6": - return c.NetTransportUDP - case "unix", "unixgram", "unixpacket": - return c.NetTransportInProc - default: - // "ip:*", "ip4:*", and "ip6:*" all are considered other. - return c.NetTransportOther - } -} - -// Host returns attributes for a network host address. -func (c *netConv) Host(address string) []attribute.KeyValue { - h, p := splitHostPort(address) - var n int - if h != "" { - n++ - if p > 0 { - n++ - } - } - - if n == 0 { - return nil - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.HostName(h)) - if p > 0 { - attrs = append(attrs, c.HostPort(p)) - } - return attrs -} - -func (c *netConv) HostName(name string) attribute.KeyValue { - return c.NetHostNameKey.String(name) -} - -func (c *netConv) HostPort(port int) attribute.KeyValue { - return c.NetHostPortKey.Int(port) -} - -func family(network, address string) string { - switch network { - case "unix", "unixgram", "unixpacket": - return "unix" - default: - if ip := net.ParseIP(address); ip != nil { - if ip.To4() == nil { - return "inet6" - } - return "inet" - } - } - return "" -} - -// Peer returns attributes for a network peer address. -func (c *netConv) Peer(address string) []attribute.KeyValue { - h, p := splitHostPort(address) - var n int - if h != "" { - n++ - if p > 0 { - n++ - } - } - - if n == 0 { - return nil - } - - attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.PeerName(h)) - if p > 0 { - attrs = append(attrs, c.PeerPort(p)) - } - return attrs -} - -func (c *netConv) PeerName(name string) attribute.KeyValue { - return c.NetPeerNameKey.String(name) -} - -func (c *netConv) PeerPort(port int) attribute.KeyValue { - return c.NetPeerPortKey.Int(port) -} - -func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { - return c.NetSockPeerAddrKey.String(addr) -} - -func (c *netConv) SockPeerPort(port int) attribute.KeyValue { - return c.NetSockPeerPortKey.Int(port) -} - -// splitHostPort splits a network address hostport of the form "host", -// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", -// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and -// port. -// -// An empty host is returned if it is not provided or unparsable. A negative -// port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { - port = -1 - - if strings.HasPrefix(hostport, "[") { - addrEnd := strings.LastIndex(hostport, "]") - if addrEnd < 0 { - // Invalid hostport. - return - } - if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { - host = hostport[1:addrEnd] - return - } - } else { - if i := strings.LastIndex(hostport, ":"); i < 0 { - host = hostport - return - } - } - - host, pStr, err := net.SplitHostPort(hostport) - if err != nil { - return - } - - p, err := strconv.ParseUint(pStr, 10, 16) - if err != nil { - return - } - return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. -} - -func netProtocol(proto string) (name string, version string) { - name, version, _ = strings.Cut(proto, "/") - switch name { - case "HTTP": - name = "http" - case "QUIC": - name = "quic" - case "SPDY": - name = "spdy" - default: - name = strings.ToLower(name) - } - return name, version -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 44b86ad8..514ae675 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,14 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" ) // Transport implements the http.RoundTripper interface and wraps @@ -129,6 +129,37 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) + + // Defer metrics recording function to record the metrics on error or no error. + defer func() { + metricAttributes := semconv.MetricAttributes{ + Req: r, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + } + + if err == nil { + metricAttributes.StatusCode = res.StatusCode + } + + metricOpts := t.semconv.MetricOptions(metricAttributes) + + metricData := semconv.MetricData{ + RequestSize: bw.BytesRead(), + } + + if err == nil { + readRecordFunc := func(int64) {} + res.Body = newWrappedBody(span, readRecordFunc, res.Body) + } + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + metricData.ElapsedTime = elapsedTime + + t.semconv.RecordMetrics(ctx, metricData, metricOpts) + }() + if err != nil { // set error type attribute if the error is part of the predefined // error types. @@ -141,35 +172,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { span.SetStatus(codes.Error, err.Error()) span.End() - return res, err - } - - // metrics - metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ - Req: r, - StatusCode: res.StatusCode, - AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), - }) - // For handling response bytes we leverage a callback when the client reads the http response - readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts) + return res, err } // traces span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) span.SetStatus(t.semconv.Status(res.StatusCode)) - res.Body = newWrappedBody(span, readRecordFunc, res.Body) - - // Use floating point division here for higher precision (instead of Millisecond method). - elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - - t.semconv.RecordMetrics(ctx, semconv.MetricData{ - RequestSize: bw.BytesRead(), - ElapsedTime: elapsedTime, - }, metricOpts) - return res, nil } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 6be4c1fd..6e096da5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,6 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.61.0" + return "0.64.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 2b53a25e..a6d0cbcc 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -8,3 +8,4 @@ nam valu thirdparty addOpt +observ diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index b01762ff..1b1b2aff 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -197,6 +197,9 @@ linters: - float-compare - go-require - require-error + usetesting: + context-background: true + context-todo: true exclusions: generated: lax presets: diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore index 53285058..994b677d 100644 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -1,4 +1,5 @@ http://localhost +https://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects @@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual -http://4.3.2.1:78/user/123 \ No newline at end of file +http://4.3.2.1:78/user/123 +file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317 +# URL works, but it has blocked link checkers. +https://dl.acm.org/doi/10.1145/198429.198435 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index f3abcfdc..ecbe0582 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,74 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05 + +### Added + +- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175) +- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages. + This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287) +- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`. + Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353) +- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434) +- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486) +- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374) +- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512) +- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524) +- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571) +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608) +- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`. + All `Processor` implementations now include an `Enabled` method. (#7639) +- The `go.opentelemetry.io/otel/semconv/v1.38.0` package. + The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648) + +### Changed + +- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set. + Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266) +- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266) +- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302) +- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306) +- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`. + ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded. + Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default. + To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363) +- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371) +- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421) +- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427) +- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438) +- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types. + If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442) + +### Fixed + +- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them. + Attributes with duplicate keys will use the last value passed. (#7300) +- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) +- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655) +- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656) + +### Removed + +- Drop support for [Go 1.23]. (#7274) +- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`. + The `Enabled` method has been added to the `Processor` interface instead. + All `Processor` implementations must now implement the `Enabled` method. + Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639) + ## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 This release is the last to support [Go 1.23]. @@ -3430,8 +3498,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD +[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0 [1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 +[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 0b3ae855..ff5e1f76 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel (This may print some warning about "build constraints exclude all Go files", just ignore it.) -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. +Alternatively, you can use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go @@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) -This would put the project in the `opentelemetry-go` directory in -current working directory. +This will add the project as `opentelemetry-go` within the current directory. Enter the newly created directory and add your fork as a new remote: @@ -109,7 +108,7 @@ A PR is considered **ready to merge** when: This is not enforced through automation, but needs to be validated by the maintainer merging. - * At least one of the qualified approvals need to be from an + * At least one of the qualified approvals needs to be from an [Approver]/[Maintainer] affiliated with a different company than the author of the PR. * PRs introducing changes that have already been discussed and consensus @@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are +use cases are clear, but the methods to satisfy those use cases are not. As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is +conforms to the specification, but the interface and structure are flexible. It is preferable to have contributions follow the idioms of the @@ -217,7 +216,7 @@ about dependency compatibility. This project does not partition dependencies based on the environment (i.e. `development`, `staging`, `production`). -Only the dependencies explicitly included in the released modules have be +Only the dependencies explicitly included in the released modules have been tested and verified to work with the released code. No other guarantee is made about the compatibility of other dependencies. @@ -635,8 +634,8 @@ is not in their root name. The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling -between the two modules where a user can upgrade the parent without the child -and if the internal package API has changed it will fail to upgrade[^3]. +between the two modules where a user can upgrade the parent without the child, +and if the internal package API has changed, it will fail to upgrade[^3]. There are two known exceptions to this rule: @@ -657,7 +656,7 @@ this. ### Ignoring context cancellation -OpenTelemetry API implementations need to ignore the cancellation of the context that are +OpenTelemetry API implementations need to ignore the cancellation of the context that is passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). Recording methods should not return an error describing the cancellation state of the context when they complete, nor should they abort any work. @@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat should be honored. This means all work done on behalf of the user provided context should be canceled. +### Observability + +OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself. +This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications. + +This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components. + +#### Environment Variable Activation + +Observability features are currently experimental. +They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable. +This follows the established experimental feature pattern used throughout the SDK. + +Components should check for this environment variable using a consistent pattern: + +```go +import "go.opentelemetry.io/otel/*/internal/x" + +if x.Observability.Enabled() { + // Initialize observability metrics +} +``` + +**References**: + +- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go) +- [sdk](./sdk/internal/x/x.go) + +#### Encapsulation + +Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`). +It should not be mixed into the instrumented component. + +Prefer this: + +```go +type SDKComponent struct { + inst *instrumentation +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +To this: + +```go +// ❌ Avoid this pattern. +type SDKComponent struct { + /* other SDKComponent fields... */ + + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} +``` + +The instrumentation code should not bloat the code being instrumented. +Likely, this means its own file, or its own package if it is complex or reused. + +#### Initialization + +Instrumentation setup should be explicit, side-effect free, and local to the relevant component. +Avoid relying on global or implicit [side effects][side-effect] for initialization. + +Encapsulate setup in constructor functions, ensuring clear ownership and scope: + +```go +import ( + "errors" + + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +type SDKComponent struct { + inst *instrumentation +} + +func NewSDKComponent(config Config) (*SDKComponent, error) { + inst, err := newInstrumentation() + if err != nil { + return nil, err + } + return &SDKComponent{inst: inst}, nil +} + +type instrumentation struct { + inflight otelconv.SDKComponentInflight + exported otelconv.SDKComponentExported +} + +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + "", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + inst := &instrumentation{} + + var err, e error + inst.inflight, e = otelconv.NewSDKComponentInflight(meter) + err = errors.Join(err, e) + + inst.exported, e = otelconv.NewSDKComponentExported(meter) + err = errors.Join(err, e) + + return inst, err +} +``` + +```go +// ❌ Avoid this pattern. +func (c *Component) initObservability() { + // Initialize observability metrics + if !x.Observability.Enabled() { + return + } + + // Initialize observability metrics + c.inst = &instrumentation{/* ... */} +} +``` + +[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science) + +#### Performance + +When observability is disabled there should be little to no overhead. + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if e.inst != nil { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + } + // Export spans... +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + attrs := expensiveOperation() + e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...) + // Export spans... +} + +func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) { + if i == nil || i.inflight == nil { + return + } + i.inflight.Add(ctx, count, metric.WithAttributes(attrs...)) +} +``` + +When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead. + +##### Attribute and Option Allocation Management + +Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes. + +```go +var ( + attrPool = sync.Pool{ + New: func() any { + // Pre-allocate common capacity + knownCap := 8 // Adjust based on expected usage + s := make([]attribute.KeyValue, 0, knownCap) + // Return a pointer to avoid extra allocation on Put(). + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + // Return a pointer to avoid extra allocation on Put(). + return &o + }, + } +) + +func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) { + attrs := attrPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // Reset. + attrPool.Put(attrs) + }() + + *attrs = append(*attrs, baseAttrs...) + // Add any dynamic attributes. + *attrs = append(*attrs, semconv.OTelComponentName("exporter-1")) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + set := attribute.NewSet(*attrs...) + *addOpt = append(*addOpt, metric.WithAttributeSet(set)) + + i.counter.Add(ctx, value, *addOpt...) +} +``` + +Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used. +This amortizes the cost of allocation and synchronization. +Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness. + +[`sync.Pool`]: https://pkg.go.dev/sync#Pool + +##### Cache common attribute sets for repeated measurements + +If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes. + +```go +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} +``` + +##### Benchmarking + +Always provide benchmarks when introducing or refactoring instrumentation. +Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios: + +```go +func BenchmarkExportSpans(b *testing.B) { + scenarios := []struct { + name string + obsEnabled bool + }{ + {"ObsDisabled", false}, + {"ObsEnabled", true}, + } + + for _, scenario := range scenarios { + b.Run(scenario.name, func(b *testing.B) { + b.Setenv( + "OTEL_GO_X_OBSERVABILITY", + strconv.FormatBool(scenario.obsEnabled), + ) + + exporter := NewExporter() + spans := generateTestSpans(100) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _ = exporter.ExportSpans(context.Background(), spans) + } + }) + } +} +``` + +#### Error Handling and Robustness + +Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible. + +```go +func newInstrumentation() (*instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + // Use the partially initialized counter if available. + i := &instrumentation{counter: counter} + // Return any error to the caller. + return i, err +} +``` + +```go +// ❌ Avoid this pattern. +func newInstrumentation() *instrumentation { + if !x.Observability.Enabled() { + return nil, nil + } + + m := otel.GetMeterProvider().Meter(/* initialize meter */) + counter, err := otelconv.NewSDKComponentCounter(m) + if err != nil { + // ❌ Do not dump the error to the OTel Handler. Return it to the + // caller. + otel.Handle(err) + // ❌ Do not return nil if we can still use the partially initialized + // counter. + return nil + } + return &instrumentation{counter: counter} +} +``` + +If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`. + +#### Context Propagation + +Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context: + +```go +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // Use the provided context for observability measurements + e.inst.recordSpanExportStarted(ctx, len(spans)) + + err := e.doExport(ctx, spans) + + if err != nil { + e.inst.recordSpanExportFailed(ctx, len(spans), err) + } else { + e.inst.recordSpanExportSucceeded(ctx, len(spans)) + } + + return err +} +``` + +```go +// ❌ Avoid this pattern. +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + // ❌ Do not break the context propagation. + e.inst.recordSpanExportStarted(context.Background(), len(spans)) + + err := e.doExport(ctx, spans) + + /* ... */ + + return err +} +``` + +#### Semantic Conventions Compliance + +All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md). + +Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go). + +##### Component Identification + +Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes). + +If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier. + +```go +componentType := "go.opentelemetry.io/otel/sdk/trace.Span" +``` + +```go +// ❌ Do not do this. +componentType := "trace-span" +``` + +The component name should be a stable unique identifier for the specific instance of the component. + +Use a global counter to ensure uniqueness if necessary. + +```go +// Unique 0-based ID counter for component instances. +var componentIDCounter atomic.Int64 + +// nextID returns the next unique ID for a component. +func nextID() int64 { + return componentIDCounter.Add(1) - 1 +} + +// componentName returns a unique name for the component instance. +func componentName() attribute.KeyValue { + id := nextID() + name := fmt.Sprintf("%s/%d", componentType, id) + return semconv.OTelComponentName(name) +} +``` + +The component ID will need to be resettable for deterministic testing. +If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter. +See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference. + +#### Testing + +Use deterministic testing with isolated state: + +```go +func TestObservability(t *testing.T) { + // Restore state after test to ensure this does not affect other tests. + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Isolate the meter provider for deterministic testing + reader := metric.NewManualReader() + meterProvider := metric.NewMeterProvider(metric.WithReader(reader)) + otel.SetMeterProvider(meterProvider) + + // Use t.Setenv to ensure environment variable is restored after test. + t.Setenv("OTEL_GO_X_OBSERVABILITY", "true") + + // Reset component ID counter to ensure deterministic component names. + componentIDCounter.Store(0) + + /* ... test code ... */ +} +``` + +Test order should not affect results. +Ensure that any global state (e.g. component ID counters) is reset between tests. + ## Approvers and Maintainers ### Maintainers @@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt ### Triagers - [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). @@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http - [Aaron Clawson](https://github.com/MadVikingGod) - [Anthony Mirabella](https://github.com/Aneurysm9) +- [Cheng-Zhen Yang](https://github.com/scorpionknifes) - [Chester Cheung](https://github.com/hanyuancheung) - [Evan Torrie](https://github.com/evantorrie) - [Gustavo Silva Paiva](https://github.com/paivagustavo) diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index bc0f1f92..44870248 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -146,11 +146,12 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short +test-fuzz: ARGS=-fuzztime=10s -fuzz test-verbose: ARGS=-v -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: TIMEOUT=120 diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 6b7ab5f2..c6335954 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -55,25 +55,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | -| Ubuntu | 1.23 | amd64 | | Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | -| Ubuntu | 1.23 | 386 | | Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | -| Ubuntu | 1.23 | arm64 | -| macOS 13 | 1.25 | amd64 | -| macOS 13 | 1.24 | amd64 | -| macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | amd64 | +| macOS | 1.24 | amd64 | | macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | -| macOS | 1.23 | arm64 | | Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | -| Windows | 1.23 | amd64 | | Windows | 1.25 | 386 | | Windows | 1.24 | 386 | -| Windows | 1.23 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1ddcdef0..861756fd 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit ## Breaking changes validation -You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. +You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). @@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` 3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` @@ -107,34 +107,50 @@ It is critical you make sure the version you push upstream is correct. ... ``` -## Release +## Sign artifacts -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. +To ensure we comply with CNCF best practices, we need to sign the release artifacts. -### Sign the Release Artifact +Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag. +Both archives need to be signed with your GPG key. -To ensure we comply with CNCF best practices, we need to sign the release artifact. -The tarball attached to the GitHub release needs to be signed with your GPG key. +You can use [this script] to verify the contents of the archives before signing them. -Follow [these steps] to sign the release artifact and upload it to GitHub. -You can use [this script] to verify the contents of the tarball before signing it. +To find your GPG key ID, run: -Be sure to use the correct GPG key when signing the release artifact. +```terminal +gpg --list-secret-keys --keyid-format=long +``` + +The key ID is the 16-character string after `sec rsa4096/` (or similar). + +Set environment variables and sign both artifacts: ```terminal -gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz +export VERSION="" # e.g., v1.32.0 +export KEY_ID="" + +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz +gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip ``` -You can verify the signature with: +You can verify the signatures with: ```terminal -gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz +gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz +gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip ``` -[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases [this script]: https://github.com/MrAlias/attest-sh +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +***IMPORTANT***: GitHub Releases are immutable once created. +You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later. + ## Post-Release ### Contrib Repository @@ -160,14 +176,6 @@ This helps track what changes were included in each release. Once all related issues and PRs have been added to the milestone, close the milestone. -### Demo Repository - -Bump the dependencies in the following Go services: - -- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) - ### Close the `Version Release` issue Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index b8cb605c..b27c9e84 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -83,7 +83,7 @@ is designed so the following goals can be achieved. in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. + alerts and dashboards. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 6333d34b..6cc1a165 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -16,7 +16,7 @@ type ( // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. + // its Iterator. This result may be cached by an attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go new file mode 100644 index 00000000..6aa69aea --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" +) + +// Type identifiers. These identifiers are hashed before the value of the +// corresponding type. This is done to distinguish values that are hashed with +// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and +// int64(0)). +// +// These are all 8 byte length strings converted to a uint64 representation. A +// uint64 is used instead of the string directly as an optimization, it avoids +// the for loop in [xxhash] which adds minor overhead. +const ( + boolID uint64 = 7953749933313450591 // "_boolean" (little endian) + int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian) + float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian) + stringID uint64 = 6874584755375207263 // "_string_" (little endian) + boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian) + int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian) + float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian) + stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian) +) + +// hashKVs returns a new xxHash64 hash of kvs. +func hashKVs(kvs []KeyValue) uint64 { + h := xxhash.New() + for _, kv := range kvs { + h = hashKV(h, kv) + } + return h.Sum64() +} + +// hashKV returns the xxHash64 hash of kv with h as the base. +func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash { + h = h.String(string(kv.Key)) + + switch kv.Value.Type() { + case BOOL: + h = h.Uint64(boolID) + h = h.Uint64(kv.Value.numeric) + case INT64: + h = h.Uint64(int64ID) + h = h.Uint64(kv.Value.numeric) + case FLOAT64: + h = h.Uint64(float64ID) + // Assumes numeric stored with math.Float64bits. + h = h.Uint64(kv.Value.numeric) + case STRING: + h = h.Uint64(stringID) + h = h.String(kv.Value.stringly) + case BOOLSLICE: + h = h.Uint64(boolSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Bool(rv.Index(i).Bool()) + } + case INT64SLICE: + h = h.Uint64(int64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Int64(rv.Index(i).Int()) + } + case FLOAT64SLICE: + h = h.Uint64(float64SliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.Float64(rv.Index(i).Float()) + } + case STRINGSLICE: + h = h.Uint64(stringSliceID) + rv := reflect.ValueOf(kv.Value.slice) + for i := 0; i < rv.Len(); i++ { + h = h.String(rv.Index(i).String()) + } + case INVALID: + default: + // Logging is an alternative, but using the internal logger here + // causes an import cycle so it is not done. + v := kv.Value.AsInterface() + msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v) + panic(msg) + } + return h +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go new file mode 100644 index 00000000..113a9783 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package xxhash provides a wrapper around the xxhash library for attribute hashing. +package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash" + +import ( + "encoding/binary" + "math" + + "github.com/cespare/xxhash/v2" +) + +// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values. +type Hash struct { + d *xxhash.Digest +} + +// New returns a new initialized xxHash64 hasher. +func New() Hash { + return Hash{d: xxhash.New()} +} + +func (h Hash) Uint64(val uint64) Hash { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], val) + // errors from Write are always nil for xxhash + // if it returns an err then panic + _, err := h.d.Write(buf[:]) + if err != nil { + panic("xxhash write of uint64 failed: " + err.Error()) + } + return h +} + +func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function. + if val { + return h.Uint64(1) + } + return h.Uint64(0) +} + +func (h Hash) Float64(val float64) Hash { + return h.Uint64(math.Float64bits(val)) +} + +func (h Hash) Int64(val int64) Hash { + return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing. +} + +func (h Hash) String(val string) Hash { + // errors from WriteString are always nil for xxhash + // if it returns an err then panic + _, err := h.d.WriteString(val) + if err != nil { + panic("xxhash write of string failed: " + err.Error()) + } + return h +} + +// Sum64 returns the current hash value. +func (h Hash) Sum64() uint64 { + return h.d.Sum64() +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 64735d38..911d557e 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -9,6 +9,8 @@ import ( "reflect" "slices" "sort" + + "go.opentelemetry.io/otel/attribute/internal/xxhash" ) type ( @@ -23,19 +25,19 @@ type ( // the Equals method to ensure stable equivalence checking. // // Users should also use the Distinct returned from Equivalent as a map key - // instead of a Set directly. In addition to that type providing guarantees - // on stable equivalence, it may also provide performance improvements. + // instead of a Set directly. Set has relatively poor performance when used + // as a map key compared to Distinct. Set struct { - equivalent Distinct + hash uint64 + data any } - // Distinct is a unique identifier of a Set. + // Distinct is an identifier of a Set which is very likely to be unique. // - // Distinct is designed to ensure equivalence stability: comparisons will - // return the same value across versions. For this reason, Distinct should - // always be used as a map key instead of a Set. + // Distinct should be used as a map key instead of a Set for to provide better + // performance for map operations. Distinct struct { - iface any + hash uint64 } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -46,15 +48,34 @@ type ( Sortable []KeyValue ) +// Compile time check these types remain comparable. +var ( + _ = isComparable(Set{}) + _ = isComparable(Distinct{}) +) + +func isComparable[T comparable](t T) T { return t } + var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, + // emptyHash is the hash of an empty set. + emptyHash = xxhash.New().Sum64() + + // userDefinedEmptySet is an empty set. It was mistakenly exposed to users + // as something they can assign to, so it must remain addressable and + // mutable. + // + // This is kept for backwards compatibility, but should not be used in new code. + userDefinedEmptySet = &Set{ + hash: emptyHash, + data: [0]KeyValue{}, + } + + emptySet = Set{ + hash: emptyHash, + data: [0]KeyValue{}, } ) @@ -62,33 +83,35 @@ var ( // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) + // Continue to return the pointer to the user-defined empty set for + // backwards-compatibility. + // + // New code should not use this, instead use emptySet. + return userDefinedEmptySet } // Valid reports whether this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil +func (d Distinct) Valid() bool { return d.hash != 0 } + +// reflectValue abbreviates reflect.ValueOf(d). +func (l Set) reflectValue() reflect.Value { + return reflect.ValueOf(l.data) } // Len returns the number of attributes in this set. func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return 0 } - return l.equivalent.reflectValue().Len() + return l.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return KeyValue{}, false } - value := l.equivalent.reflectValue() + value := l.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil || !l.equivalent.Valid() { + if l == nil || l.hash == 0 { return Value{}, false } - rValue := l.equivalent.reflectValue() + rValue := l.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue { return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any +// Equivalent returns a value that may be used as a map key. Equal Distinct +// values are very likely to be equivalent attribute Sets. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent + if l == nil || l.hash == 0 { + return Distinct{hash: emptySet.hash} } - return l.equivalent + return Distinct{hash: l.hash} } // Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() + if l.Equivalent() != o.Equivalent() { + return false + } + if l == nil || l.hash == 0 { + l = &emptySet + } + if o == nil || o.hash == 0 { + o = &emptySet + } + return l.data == o.data } // Encoded returns the encoded form of this set, according to encoder. @@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string { return encoder.Encode(l.Iter()) } -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // @@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { - return empty(), nil + return emptySet, nil } // Stable sort so the following de-duplication can implement @@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if filter != nil { if div := filteredToFront(kvs, filter); div != 0 { - return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + return newSet(kvs[div:]), kvs[:div] } } - return Set{equivalent: computeDistinct(kvs)}, nil + return newSet(kvs), nil } // NewSetWithSortableFiltered returns a new Set. @@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { if first == 0 { // It is safe to assume len(slice) >= 1 given we found at least one // attribute above that needs to be filtered out. - return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + return newSet(slice[1:]), slice[:1] } // Move the filtered slice[first] to the front (preserving order). @@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { // Do not re-evaluate re(slice[first+1:]). div := filteredToFront(slice[1:first+1], re) + 1 - return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] + return newSet(slice[div:]), slice[:div] } -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) +// newSet returns a new set based on the sorted and uniqued kvs. +func newSet(kvs []KeyValue) Set { + s := Set{ + hash: hashKVs(kvs), + data: computeDataFixed(kvs), } - return Distinct{ - iface: iface, + if s.data == nil { + s.data = computeDataReflect(kvs) } + return s } -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) any { +// computeDataFixed computes a Set data for small slices. It returns nil if the +// input is too large for this code path. +func computeDataFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any { } } -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) any { +// computeDataReflect computes a Set data using reflection, works for any size +// input. +func computeDataReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any { // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) + return json.Marshal(l.data) } // MarshalLog is the marshaling function used by the logging system to represent this Set. diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go index e584b247..24f1fa37 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Type_index)-1 { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Type_name[_Type_index[i]:_Type_index[i+1]] + return _Type_name[_Type_index[idx]:_Type_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index f83a448e..78e98c4c 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // If we couldn't find any valid key character, // it means the key is either empty or invalid. if keyStart == keyEnd { - return + return p, ok } // Skip spaces after the key: " key< >= value ". @@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // A key can have no value, like: " key ". ok = true p.key = s[keyStart:keyEnd] - return + return p, ok } // If we have not reached the end and we can't find the '=' delimiter, // it means the property is invalid. if s[index] != keyValueDelimiter[0] { - return + return p, ok } // Attempting to parse the value. @@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) { // we have not reached the end, it means the property is // invalid, something like: " key = value value1". if index != len(s) { - return + return p, ok } // Decode a percent-encoded value. rawVal := s[valueStart:valueEnd] unescapeVal, err := url.PathUnescape(rawVal) if err != nil { - return + return p, ok } value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) @@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { p.hasValue = true p.value = value - return + return p, ok } func skipSpace(s string, offset int) int { diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index a311fbb4..cadb87cc 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python -FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver +FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index adb37b5b..6db969f7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -105,7 +105,7 @@ type delegatedInstrument interface { setDelegate(metric.Meter) } -// instID are the identifying properties of a instrument. +// instID are the identifying properties of an instrument. type instID struct { // name is the name of the stream. name string diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index 1e6473b3..527d9aec 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -11,7 +11,7 @@ import ( // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. -// If the name is empty, then a implementation defined default name will be +// If the name is empty, then an implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index d9e3b13e..e42dd6e7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -3,7 +3,11 @@ package metric // import "go.opentelemetry.io/otel/metric" -import "go.opentelemetry.io/otel/attribute" +import ( + "slices" + + "go.opentelemetry.io/otel/attribute" +) // MeterConfig contains options for Meters. type MeterConfig struct { @@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// WithInstrumentationAttributes adds the instrumentation attributes. +// +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. // -// The passed attributes will be de-duplicated. +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) MeterOption { + if set.Len() == 0 { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + return config + }) + } + return meterOptionFunc(func(config MeterConfig) MeterConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 6692d266..271ab71f 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { } // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked. // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go new file mode 100644 index 00000000..bfeb73e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import "strings" + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature( + []string{"RESOURCE"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) + +// Observability is an experimental feature flag that determines if SDK +// observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Observability = newFeature( + []string{"OBSERVABILITY", "SELF_OBSERVABILITY"}, + func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false + }, +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go index 68d296cb..13347e56 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -1,48 +1,38 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/x/x.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package x contains support for OTel SDK experimental features. -// -// This package should only be used for features defined in the specification. -// It should not be used for experiments or new project ideas. +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk]. package x // import "go.opentelemetry.io/otel/sdk/internal/x" import ( "os" - "strings" ) -// Resource is an experimental feature flag that defines if resource detectors -// should be included experimental semantic conventions. -// -// To enable this feature set the OTEL_GO_X_RESOURCE environment variable -// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" -// will also enable this). -var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { - return v, true - } - return "", false -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { - key string + keys []string parse func(v string) (T, bool) } -func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { +func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" + keys := make([]string, 0, len(suffix)) + for _, s := range suffix { + keys = append(keys, envKeyRoot+s) + } return Feature[T]{ - key: envKeyRoot + suffix, + keys: keys, parse: parse, } } -// Key returns the environment variable key that needs to be set to enable the +// Keys returns the environment variable keys that can be set to enable the // feature. -func (f Feature[T]) Key() string { return f.key } +func (f Feature[T]) Keys() []string { return f.keys } // Lookup returns the user configured value for the feature and true if the // user has enabled the feature. Otherwise, if the feature is not enabled, a @@ -52,14 +42,16 @@ func (f Feature[T]) Lookup() (v T, ok bool) { // // > The SDK MUST interpret an empty value of an environment variable the // > same way as when the variable is unset. - vRaw := os.Getenv(f.key) - if vRaw == "" { - return v, ok + for _, key := range f.keys { + vRaw := os.Getenv(key) + if vRaw != "" { + return f.parse(vRaw) + } } - return f.parse(vRaw) + return v, ok } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE index 261eeb9e..f1aee0f1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index 203cd9d6..c6440a13 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -7,6 +7,7 @@ import ( "context" "errors" "os" + "strconv" "strings" "sync" @@ -17,12 +18,15 @@ import ( // config contains configuration options for a MeterProvider. type config struct { - res *resource.Resource - readers []Reader - views []View - exemplarFilter exemplar.Filter + res *resource.Resource + readers []Reader + views []View + exemplarFilter exemplar.Filter + cardinalityLimit int } +const defaultCardinalityLimit = 0 + // readerSignals returns a force-flush and shutdown function for a // MeterProvider to call in their respective options. All Readers c contains // will have their force-flush and shutdown methods unified into returned @@ -69,8 +73,9 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { conf := config{ - res: resource.Default(), - exemplarFilter: exemplar.TraceBasedFilter, + res: resource.Default(), + exemplarFilter: exemplar.TraceBasedFilter, + cardinalityLimit: cardinalityLimitFromEnv(), } for _, o := range meterProviderOptionsFromEnv() { conf = o.apply(conf) @@ -155,6 +160,21 @@ func WithExemplarFilter(filter exemplar.Filter) Option { }) } +// WithCardinalityLimit sets the cardinality limit for the MeterProvider. +// +// The cardinality limit is the hard limit on the number of metric datapoints +// that can be collected for a single instrument in a single collect cycle. +// +// Setting this to a zero or negative value means no limit is applied. +func WithCardinalityLimit(limit int) Option { + // For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT` + // can also be used to set this value. + return optionFunc(func(cfg config) config { + cfg.cardinalityLimit = limit + return cfg + }) +} + func meterProviderOptionsFromEnv() []Option { var opts []Option // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar @@ -170,3 +190,17 @@ func meterProviderOptionsFromEnv() []Option { } return opts } + +func cardinalityLimitFromEnv() int { + const cardinalityLimitKey = "OTEL_GO_X_CARDINALITY_LIMIT" + v := strings.TrimSpace(os.Getenv(cardinalityLimitKey)) + if v == "" { + return defaultCardinalityLimit + } + n, err := strconv.Atoi(v) + if err != nil { + otel.Handle(err) + return defaultCardinalityLimit + } + return n +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 90a4ae16..dd75eefa 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -39,6 +39,46 @@ // Meter.RegisterCallback and Registration.Unregister to add and remove // callbacks without leaking memory. // +// # Cardinality Limits +// +// Cardinality refers to the number of unique attributes collected. High cardinality can lead to +// excessive memory usage, increased storage costs, and backend performance issues. +// +// Currently, the OpenTelemetry Go Metric SDK does not enforce a cardinality limit by default +// (note that this may change in a future release). Use [WithCardinalityLimit] to set the +// cardinality limit as desired. +// +// New attribute sets are dropped when the cardinality limit is reached. The measurement of +// these sets are aggregated into +// a special attribute set containing attribute.Bool("otel.metric.overflow", true). +// This ensures total metric values (e.g., Sum, Count) remain correct for the +// collection cycle, but information about the specific dropped sets +// is not preserved. +// +// Recommendations: +// +// - Set the limit based on the theoretical maximum combinations or expected +// active combinations. The OpenTelemetry Specification recommends a default of 2000. +// - A too high of a limit increases worst-case memory overhead in the SDK and may cause downstream +// issues for databases that cannot handle high cardinality. +// - A too low of a limit causes loss of attribute detail as more data falls into overflow. +// +// # Ordering and Collection Guarantees +// +// For performance reasons, the SDK does not guarantee that the order in which +// synchronous measurements are made to the SDK is reflected in the collected +// metric data. This means that even when a single goroutine makes sequential +// synchronous measurements, it is possible for a later measurement to be +// included in the collected metric data when an earlier measurement is not. +// This applies to measurements made to different instruments, or to different +// attribute sets on the same instrument. Sequential measurements made to the +// same instrument and with the same attributes are guaranteed to preserve +// ordering with respect to collection. +// +// Additionally, the SDK does not guarantee that exemplars are always included +// in the same batch of metric data as the measurement they are associated +// with. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 549d3bd5..38b8745e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -58,10 +58,7 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi // SimpleFixedSizeExemplarReservoir with a reservoir equal to the // smaller of the maximum number of buckets configured on the // aggregation or twenty (e.g. min(20, max_buckets)). - n = int(a.MaxSize) - if n > 20 { - n = 20 - } + n = min(int(a.MaxSize), 20) } else { // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir // This Exemplar reservoir MAY take a configuration parameter for @@ -69,11 +66,11 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi // provided, the default size MAY be the number of possible // concurrent threads (e.g. number of CPUs) to help reduce // contention. Otherwise, a default size of 1 SHOULD be used. - n = runtime.NumCPU() - if n < 1 { - // Should never be the case, but be defensive. - n = 1 - } + // + // Use runtime.GOMAXPROCS instead of runtime.NumCPU to support + // containerized environments that may have less than the total number + // of logical CPUs available on the local machine allocated to it. + n = max(runtime.GOMAXPROCS(0), 1) } return exemplar.FixedSizeReservoirProvider(n) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go index b595e2ac..b50f5c15 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go @@ -24,11 +24,11 @@ func TraceBasedFilter(ctx context.Context) bool { } // AlwaysOnFilter is a [Filter] that always offers measurements. -func AlwaysOnFilter(ctx context.Context) bool { +func AlwaysOnFilter(context.Context) bool { return true } // AlwaysOffFilter is a [Filter] that never offers measurements. -func AlwaysOffFilter(ctx context.Context) bool { +func AlwaysOffFilter(context.Context) bool { return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go index 1fb1e009..453278a0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -7,14 +7,16 @@ import ( "context" "math" "math/rand/v2" + "sync" "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. func FixedSizeReservoirProvider(k int) ReservoirProvider { - return func(_ attribute.Set) Reservoir { + return func(attribute.Set) Reservoir { return NewFixedSizeReservoir(k) } } @@ -34,7 +36,9 @@ var _ Reservoir = &FixedSizeReservoir{} // If there are more than k, the Reservoir will then randomly sample all // additional measurement with a decreasing probability. type FixedSizeReservoir struct { + reservoir.ConcurrentSafe *storage + mu sync.Mutex // count is the number of measurement seen. count int64 @@ -56,7 +60,7 @@ func newFixedSizeReservoir(s *storage) *FixedSizeReservoir { // randomFloat64 returns, as a float64, a uniform pseudo-random number in the // open interval (0.0,1.0). -func (r *FixedSizeReservoir) randomFloat64() float64 { +func (*FixedSizeReservoir) randomFloat64() float64 { // TODO: Use an algorithm that avoids rejection sampling. For example: // // const precision = 1 << 53 // 2^53 @@ -123,15 +127,15 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a // https://github.com/MrAlias/reservoir-sampling for a performance // comparison of reservoir sampling algorithms. - if int(r.count) < cap(r.store) { - r.store[r.count] = newMeasurement(ctx, t, n, a) - } else { - if r.count == r.next { - // Overwrite a random existing measurement with the one offered. - idx := int(rand.Int64N(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) - r.advance() - } + r.mu.Lock() + defer r.mu.Unlock() + if int(r.count) < cap(r.measurements) { + r.store(int(r.count), newMeasurement(ctx, t, n, a)) + } else if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rand.Int64N(int64(cap(r.measurements)))) + r.store(idx, newMeasurement(ctx, t, n, a)) + r.advance() } r.count++ } @@ -141,7 +145,7 @@ func (r *FixedSizeReservoir) reset() { // This resets the number of exemplars known. r.count = 0 // Random index inserts should only happen after the storage is full. - r.next = int64(cap(r.store)) + r.next = int64(cap(r.measurements)) // Initial random number in the series used to generate r.next. // @@ -152,7 +156,7 @@ func (r *FixedSizeReservoir) reset() { // This maps the uniform random number in (0,1) to a geometric distribution // over the same interval. The mean of the distribution is inversely // proportional to the storage capacity. - r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.measurements))) r.advance() } @@ -172,7 +176,7 @@ func (r *FixedSizeReservoir) advance() { // therefore the next r.w will be based on the same distribution (i.e. // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by // computing the next random number `u` and take r.w as `w * u^(1/k)`. - r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.measurements))) // Use the new random number in the series to calculate the count of the // next measurement that will be stored. // @@ -190,6 +194,8 @@ func (r *FixedSizeReservoir) advance() { // // The Reservoir state is preserved after this call. func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) { + r.mu.Lock() + defer r.mu.Unlock() r.storage.Collect(dest) // Call reset here even though it will reset r.count and restart the random // number series. This will persist any old exemplars as long as no new diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go index 3b76cf30..60c871a4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -7,16 +7,18 @@ import ( "context" "slices" "sort" + "sync" "time" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // HistogramReservoirProvider is a provider of [HistogramReservoir]. func HistogramReservoirProvider(bounds []float64) ReservoirProvider { cp := slices.Clone(bounds) slices.Sort(cp) - return func(_ attribute.Set) Reservoir { + return func(attribute.Set) Reservoir { return NewHistogramReservoir(cp) } } @@ -39,7 +41,9 @@ var _ Reservoir = &HistogramReservoir{} // falls within a histogram bucket. The histogram bucket upper-boundaries are // define by bounds. type HistogramReservoir struct { + reservoir.ConcurrentSafe *storage + mu sync.Mutex // bounds are bucket bounds in ascending order. bounds []float64 @@ -57,14 +61,29 @@ type HistogramReservoir struct { // parameters are the value and dropped (filtered) attributes of the // measurement respectively. func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { - var x float64 + var n float64 switch v.Type() { case Int64ValueType: - x = float64(v.Int64()) + n = float64(v.Int64()) case Float64ValueType: - x = v.Float64() + n = v.Float64() default: panic("unknown value type") } - r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) + + idx := sort.SearchFloat64s(r.bounds, n) + m := newMeasurement(ctx, t, v, a) + + r.mu.Lock() + defer r.mu.Unlock() + r.store(idx, m) +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *HistogramReservoir) Collect(dest *[]Exemplar) { + r.mu.Lock() + defer r.mu.Unlock() + r.storage.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go index 0e2e26df..16b61c07 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go @@ -13,24 +13,28 @@ import ( // storage is an exemplar storage for [Reservoir] implementations. type storage struct { - // store are the measurements sampled. + // measurements are the measurements sampled. // // This does not use []metricdata.Exemplar because it potentially would // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement + measurements []measurement } func newStorage(n int) *storage { - return &storage{store: make([]measurement, n)} + return &storage{measurements: make([]measurement, n)} +} + +func (r *storage) store(idx int, m measurement) { + r.measurements[idx] = m } // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. func (r *storage) Collect(dest *[]Exemplar) { - *dest = reset(*dest, len(r.store), len(r.store)) + *dest = reset(*dest, len(r.measurements), len(r.measurements)) var n int - for _, m := range r.store { + for _, m := range r.measurements { if !m.valid { continue } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index 18891ed5..63cccc50 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -75,7 +75,7 @@ type Instrument struct { nonComparable // nolint: unused } -// IsEmpty returns if all Instrument fields are their zero-value. +// IsEmpty reports whether all Instrument fields are their zero-value. func (i Instrument) IsEmpty() bool { return i.Name == "" && i.Description == "" && @@ -204,7 +204,7 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record i.aggregate(ctx, val, c.Attributes()) } -func (i *int64Inst) Enabled(_ context.Context) bool { +func (i *int64Inst) Enabled(context.Context) bool { return len(i.measures) != 0 } @@ -245,7 +245,7 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re i.aggregate(ctx, val, c.Attributes()) } -func (i *float64Inst) Enabled(_ context.Context) bool { +func (i *float64Inst) Enabled(context.Context) bool { return len(i.measures) != 0 } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go index 25ea6244..e0558cb6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -23,8 +23,9 @@ const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogr var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} func (i InstrumentKind) String() string { - if i >= InstrumentKind(len(_InstrumentKind_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_InstrumentKind_index)-1 { return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] + return _InstrumentKind_name[_InstrumentKind_index[idx]:_InstrumentKind_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index 0321da68..2b604108 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -110,12 +110,13 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati // Sum returns a sum aggregate function input and output. func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { - s := newSum[N](monotonic, b.AggregationLimit, b.resFunc()) switch b.Temporality { case metricdata.DeltaTemporality: - return b.filter(s.measure), s.delta + s := newDeltaSum[N](monotonic, b.AggregationLimit, b.resFunc()) + return b.filter(s.measure), s.collect default: - return b.filter(s.measure), s.cumulative + s := newCumulativeSum[N](monotonic, b.AggregationLimit, b.resFunc()) + return b.filter(s.measure), s.collect } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go new file mode 100644 index 00000000..0fa6d3c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go @@ -0,0 +1,184 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "math" + "runtime" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" +) + +// atomicCounter is an efficient way of adding to a number which is either an +// int64 or float64. It is designed to be efficient when adding whole +// numbers, regardless of whether N is an int64 or float64. +// +// Inspired by the Prometheus counter implementation: +// https://github.com/prometheus/client_golang/blob/14ccb93091c00f86b85af7753100aa372d63602b/prometheus/counter.go#L108 +type atomicCounter[N int64 | float64] struct { + // nFloatBits contains only the non-integer portion of the counter. + nFloatBits atomic.Uint64 + // nInt contains only the integer portion of the counter. + nInt atomic.Int64 +} + +// load returns the current value. The caller must ensure all calls to add have +// returned prior to calling load. +func (n *atomicCounter[N]) load() N { + fval := math.Float64frombits(n.nFloatBits.Load()) + ival := n.nInt.Load() + return N(fval + float64(ival)) +} + +func (n *atomicCounter[N]) add(value N) { + ival := int64(value) + // This case is where the value is an int, or if it is a whole-numbered float. + if float64(ival) == float64(value) { + n.nInt.Add(ival) + return + } + + // Value must be a float below. + for { + oldBits := n.nFloatBits.Load() + newBits := math.Float64bits(math.Float64frombits(oldBits) + float64(value)) + if n.nFloatBits.CompareAndSwap(oldBits, newBits) { + return + } + } +} + +// hotColdWaitGroup is a synchronization primitive which enables lockless +// writes for concurrent writers and enables a reader to acquire exclusive +// access to a snapshot of state including only completed operations. +// Conceptually, it can be thought of as a "hot" wait group, +// and a "cold" wait group, with the ability for the reader to atomically swap +// the hot and cold wait groups, and wait for the now-cold wait group to +// complete. +// +// Inspired by the prometheus/client_golang histogram implementation: +// https://github.com/prometheus/client_golang/blob/a974e0d45e0aa54c65492559114894314d8a2447/prometheus/histogram.go#L725 +// +// Usage: +// +// var hcwg hotColdWaitGroup +// var data [2]any +// +// func write() { +// hotIdx := hcwg.start() +// defer hcwg.done(hotIdx) +// // modify data without locking +// data[hotIdx].update() +// } +// +// func read() { +// coldIdx := hcwg.swapHotAndWait() +// // read data now that all writes to the cold data have completed. +// data[coldIdx].read() +// } +type hotColdWaitGroup struct { + // startedCountAndHotIdx contains a 63-bit counter in the lower bits, + // and a 1 bit hot index to denote which of the two data-points new + // measurements to write to. These are contained together so that read() + // can atomically swap the hot bit, reset the started writes to zero, and + // read the number writes that were started prior to the hot bit being + // swapped. + startedCountAndHotIdx atomic.Uint64 + // endedCounts is the number of writes that have completed to each + // dataPoint. + endedCounts [2]atomic.Uint64 +} + +// start returns the hot index that the writer should write to. The returned +// hot index is 0 or 1. The caller must call done(hot index) after it finishes +// its operation. start() is safe to call concurrently with other methods. +func (l *hotColdWaitGroup) start() uint64 { + // We increment h.startedCountAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to return the currently-hot index. + return l.startedCountAndHotIdx.Add(1) >> 63 +} + +// done signals to the reader that an operation has fully completed. +// done is safe to call concurrently. +func (l *hotColdWaitGroup) done(hotIdx uint64) { + l.endedCounts[hotIdx].Add(1) +} + +// swapHotAndWait swaps the hot bit, waits for all start() calls to be done(), +// and then returns the now-cold index for the reader to read from. The +// returned index is 0 or 1. swapHotAndWait must not be called concurrently. +func (l *hotColdWaitGroup) swapHotAndWait() uint64 { + n := l.startedCountAndHotIdx.Load() + coldIdx := (^n) >> 63 + // Swap the hot and cold index while resetting the started measurements + // count to zero. + n = l.startedCountAndHotIdx.Swap((coldIdx << 63)) + hotIdx := n >> 63 + startedCount := n & ((1 << 63) - 1) + // Wait for all measurements to the previously-hot map to finish. + for startedCount != l.endedCounts[hotIdx].Load() { + runtime.Gosched() // Let measurements complete. + } + // reset the number of ended operations + l.endedCounts[hotIdx].Store(0) + return hotIdx +} + +// limitedSyncMap is a sync.Map which enforces the aggregation limit on +// attribute sets and provides a Len() function. +type limitedSyncMap struct { + sync.Map + aggLimit int + len int + lenMux sync.Mutex +} + +func (m *limitedSyncMap) LoadOrStoreAttr(fltrAttr attribute.Set, newValue func(attribute.Set) any) any { + actual, loaded := m.Load(fltrAttr.Equivalent()) + if loaded { + return actual + } + // If the overflow set exists, assume we have already overflowed and don't + // bother with the slow path below. + actual, loaded = m.Load(overflowSet.Equivalent()) + if loaded { + return actual + } + // Slow path: add a new attribute set. + m.lenMux.Lock() + defer m.lenMux.Unlock() + + // re-fetch now that we hold the lock to ensure we don't use the overflow + // set unless we are sure the attribute set isn't being written + // concurrently. + actual, loaded = m.Load(fltrAttr.Equivalent()) + if loaded { + return actual + } + + if m.aggLimit > 0 && m.len >= m.aggLimit-1 { + fltrAttr = overflowSet + } + actual, loaded = m.LoadOrStore(fltrAttr.Equivalent(), newValue(fltrAttr)) + if !loaded { + m.len++ + } + return actual +} + +func (m *limitedSyncMap) Clear() { + m.lenMux.Lock() + defer m.lenMux.Unlock() + m.len = 0 + m.Map.Clear() +} + +func (m *limitedSyncMap) Len() int { + m.lenMux.Lock() + defer m.lenMux.Unlock() + return m.len +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go index 8396faaa..129920cb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go @@ -18,10 +18,10 @@ func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N type dropRes[N int64 | float64] struct{} // Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} +func (*dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) { +func (*dropRes[N]) Collect(dest *[]exemplar.Exemplar) { clear(*dest) // Erase elements to let GC collect objects *dest = (*dest)[:0] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index ae1f5934..5b3a19c0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -183,8 +183,8 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) var count int32 for high-low >= p.maxSize { - low = low >> 1 - high = high >> 1 + low >>= 1 + high >>= 1 count++ if count > expoMaxScale-expoMinScale { return count @@ -225,7 +225,7 @@ func (b *expoBuckets) record(bin int32) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + copy(b.counts[shift:origLen+int(shift)], b.counts) b.counts = b.counts[:newLength] for i := 1; i < int(shift); i++ { b.counts[i] = 0 @@ -264,7 +264,7 @@ func (b *expoBuckets) downscale(delta int32) { // new Counts: [4, 14, 30, 10] if len(b.counts) <= 1 || delta < 1 { - b.startBin = b.startBin >> delta + b.startBin >>= delta return } @@ -282,7 +282,7 @@ func (b *expoBuckets) downscale(delta int32) { lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) b.counts = b.counts[:lastIdx+1] - b.startBin = b.startBin >> delta + b.startBin >>= delta } // newExponentialHistogram returns an Aggregator that summarizes a set of @@ -301,7 +301,7 @@ func newExponentialHistogram[N int64 | float64]( maxScale: maxScale, newRes: r, - limit: newLimiter[*expoHistogramDataPoint[N]](limit), + limit: newLimiter[expoHistogramDataPoint[N]](limit), values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]), start: now(), @@ -317,7 +317,7 @@ type expoHistogram[N int64 | float64] struct { maxScale int32 newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[*expoHistogramDataPoint[N]] + limit limiter[expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -338,19 +338,26 @@ func (e *expoHistogram[N]) measure( e.valuesMu.Lock() defer e.valuesMu.Unlock() - attr := e.limit.Attributes(fltrAttr, e.values) - v, ok := e.values[attr.Equivalent()] + v, ok := e.values[fltrAttr.Equivalent()] if !ok { - v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) - v.res = e.newRes(attr) - - e.values[attr.Equivalent()] = v + fltrAttr = e.limit.Attributes(fltrAttr, e.values) + // If we overflowed, make sure we add to the existing overflow series + // if it already exists. + v, ok = e.values[fltrAttr.Equivalent()] + if !ok { + v = newExpoHistogramDataPoint[N](fltrAttr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) + v.res = e.newRes(fltrAttr) + + e.values[fltrAttr.Equivalent()] = v + } } v.record(value) v.res.Offer(ctx, value, droppedAttr) } -func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { +func (e *expoHistogram[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. @@ -411,7 +418,9 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { return n } -func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { +func (e *expoHistogram[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go index d4c41642..e4f9409b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go @@ -5,10 +5,12 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" + "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" ) // FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter. @@ -29,6 +31,11 @@ type FilteredExemplarReservoir[N int64 | float64] interface { type filteredExemplarReservoir[N int64 | float64] struct { filter exemplar.Filter reservoir exemplar.Reservoir + // The exemplar.Reservoir is not required to be concurrent safe, but + // implementations can indicate that they are concurrent-safe by embedding + // reservoir.ConcurrentSafe in order to improve performance. + reservoirMux sync.Mutex + concurrentSafe bool } // NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values @@ -37,17 +44,30 @@ func NewFilteredExemplarReservoir[N int64 | float64]( f exemplar.Filter, r exemplar.Reservoir, ) FilteredExemplarReservoir[N] { + _, concurrentSafe := r.(reservoir.ConcurrentSafe) return &filteredExemplarReservoir[N]{ - filter: f, - reservoir: r, + filter: f, + reservoir: r, + concurrentSafe: concurrentSafe, } } func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { if f.filter(ctx) { // only record the current time if we are sampling this measurement. - f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr) + ts := time.Now() + if !f.concurrentSafe { + f.reservoirMux.Lock() + defer f.reservoirMux.Unlock() + } + f.reservoir.Offer(ctx, ts, exemplar.NewValue(val), attr) } } -func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) } +func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { + if !f.concurrentSafe { + f.reservoirMux.Lock() + defer f.reservoirMux.Unlock() + } + f.reservoir.Collect(dest) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index d3068484..a094519c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -31,9 +31,12 @@ func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] { func (b *buckets[N]) sum(value N) { b.total += value } -func (b *buckets[N]) bin(idx int, value N) { +func (b *buckets[N]) bin(idx int) { b.counts[idx]++ b.count++ +} + +func (b *buckets[N]) minMax(value N) { if value < b.min { b.min = value } else if value > b.max { @@ -44,18 +47,19 @@ func (b *buckets[N]) bin(idx int, value N) { // histValues summarizes a set of measurements as an histValues with // explicitly defined buckets. type histValues[N int64 | float64] struct { - noSum bool - bounds []float64 + noMinMax bool + noSum bool + bounds []float64 newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[*buckets[N]] + limit limiter[buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } func newHistValues[N int64 | float64]( bounds []float64, - noSum bool, + noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N], ) *histValues[N] { @@ -66,11 +70,12 @@ func newHistValues[N int64 | float64]( b := slices.Clone(bounds) slices.Sort(b) return &histValues[N]{ - noSum: noSum, - bounds: b, - newRes: r, - limit: newLimiter[*buckets[N]](limit), - values: make(map[attribute.Distinct]*buckets[N]), + noMinMax: noMinMax, + noSum: noSum, + bounds: b, + newRes: r, + limit: newLimiter[buckets[N]](limit), + values: make(map[attribute.Distinct]*buckets[N]), } } @@ -92,24 +97,32 @@ func (s *histValues[N]) measure( s.valuesMu.Lock() defer s.valuesMu.Unlock() - attr := s.limit.Attributes(fltrAttr, s.values) - b, ok := s.values[attr.Equivalent()] + b, ok := s.values[fltrAttr.Equivalent()] if !ok { - // N+1 buckets. For example: - // - // bounds = [0, 5, 10] - // - // Then, - // - // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) - b = newBuckets[N](attr, len(s.bounds)+1) - b.res = s.newRes(attr) - - // Ensure min and max are recorded values (not zero), for new buckets. - b.min, b.max = value, value - s.values[attr.Equivalent()] = b + fltrAttr = s.limit.Attributes(fltrAttr, s.values) + // If we overflowed, make sure we add to the existing overflow series + // if it already exists. + b, ok = s.values[fltrAttr.Equivalent()] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets[N](fltrAttr, len(s.bounds)+1) + b.res = s.newRes(fltrAttr) + + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = value, value + s.values[fltrAttr.Equivalent()] = b + } + } + b.bin(idx) + if !s.noMinMax { + b.minMax(value) } - b.bin(idx, value) if !s.noSum { b.sum(value) } @@ -125,8 +138,7 @@ func newHistogram[N int64 | float64]( r func(attribute.Set) FilteredExemplarReservoir[N], ) *histogram[N] { return &histogram[N]{ - histValues: newHistValues[N](boundaries, noSum, limit, r), - noMinMax: noMinMax, + histValues: newHistValues[N](boundaries, noMinMax, noSum, limit, r), start: now(), } } @@ -136,11 +148,12 @@ func newHistogram[N int64 | float64]( type histogram[N int64 | float64] struct { *histValues[N] - noMinMax bool - start time.Time + start time.Time } -func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { +func (s *histogram[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that @@ -190,7 +203,9 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *histogram[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 350ccebd..3e2ed741 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -23,7 +23,7 @@ func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredEx return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), - values: make(map[attribute.Distinct]datapoint[N]), + values: make(map[attribute.Distinct]*datapoint[N]), start: now(), } } @@ -34,7 +34,7 @@ type lastValue[N int64 | float64] struct { newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[datapoint[N]] - values map[attribute.Distinct]datapoint[N] + values map[attribute.Distinct]*datapoint[N] start time.Time } @@ -42,20 +42,24 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. s.Lock() defer s.Unlock() - attr := s.limit.Attributes(fltrAttr, s.values) - d, ok := s.values[attr.Equivalent()] + d, ok := s.values[fltrAttr.Equivalent()] if !ok { - d.res = s.newRes(attr) + fltrAttr = s.limit.Attributes(fltrAttr, s.values) + d = &datapoint[N]{ + res: s.newRes(fltrAttr), + attrs: fltrAttr, + } } - d.attrs = attr d.value = value d.res.Offer(ctx, value, droppedAttr) - s.values[attr.Equivalent()] = d + s.values[fltrAttr.Equivalent()] = d } -func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { +func (s *lastValue[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -75,7 +79,9 @@ func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *lastValue[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -126,7 +132,9 @@ type precomputedLastValue[N int64 | float64] struct { *lastValue[N] } -func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { +func (s *precomputedLastValue[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -146,7 +154,9 @@ func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *precomputedLastValue[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go index 9ea0251e..c19a1aff 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go @@ -30,7 +30,7 @@ func newLimiter[V any](aggregation int) limiter[V] { // aggregation cardinality limit for the existing measurements. If it will, // overflowSet is returned. Otherwise, if it will not exceed the limit, or the // limit is not set (limit <= 0), attr is returned. -func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set { +func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]*V) attribute.Set { if l.aggLimit > 0 { _, exists := measurements[attrs.Equivalent()] if !exists && len(measurements) >= l.aggLimit-1 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 612cde43..81690855 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -5,7 +5,6 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" - "sync" "time" "go.opentelemetry.io/otel/attribute" @@ -13,64 +12,77 @@ import ( ) type sumValue[N int64 | float64] struct { - n N + n atomicCounter[N] res FilteredExemplarReservoir[N] attrs attribute.Set } -// valueMap is the storage for sums. type valueMap[N int64 | float64] struct { - sync.Mutex + values limitedSyncMap newRes func(attribute.Set) FilteredExemplarReservoir[N] - limit limiter[sumValue[N]] - values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { - return &valueMap[N]{ - newRes: r, - limit: newLimiter[sumValue[N]](limit), - values: make(map[attribute.Distinct]sumValue[N]), - } -} - -func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { - s.Lock() - defer s.Unlock() - - attr := s.limit.Attributes(fltrAttr, s.values) - v, ok := s.values[attr.Equivalent()] - if !ok { - v.res = s.newRes(attr) - } - - v.attrs = attr - v.n += value - v.res.Offer(ctx, value, droppedAttr) - - s.values[attr.Equivalent()] = v +func (s *valueMap[N]) measure( + ctx context.Context, + value N, + fltrAttr attribute.Set, + droppedAttr []attribute.KeyValue, +) { + sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any { + return &sumValue[N]{ + res: s.newRes(attr), + attrs: attr, + } + }).(*sumValue[N]) + sv.n.add(value) + // It is possible for collection to race with measurement and observe the + // exemplar in the batch of metrics after the add() for cumulative sums. + // This is an accepted tradeoff to avoid locking during measurement. + sv.res.Offer(ctx, value, droppedAttr) } -// newSum returns an aggregator that summarizes a set of measurements as their -// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle -// the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { - return &sum[N]{ - valueMap: newValueMap[N](limit, r), +// newDeltaSum returns an aggregator that summarizes a set of measurements as +// their arithmetic sum. Each sum is scoped by attributes and the aggregation +// cycle the measurements were made in. +func newDeltaSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *deltaSum[N] { + return &deltaSum[N]{ monotonic: monotonic, start: now(), + hotColdValMap: [2]valueMap[N]{ + { + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + { + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + }, } } -// sum summarizes a set of measurements made as their arithmetic sum. -type sum[N int64 | float64] struct { - *valueMap[N] - +// deltaSum is the storage for sums which resets every collection interval. +type deltaSum[N int64 | float64] struct { monotonic bool start time.Time + + hcwg hotColdWaitGroup + hotColdValMap [2]valueMap[N] +} + +func (s *deltaSum[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { + hotIdx := s.hcwg.start() + defer s.hcwg.done(hotIdx) + s.hotColdValMap[hotIdx].measure(ctx, value, fltrAttr, droppedAttr) } -func (s *sum[N]) delta(dest *metricdata.Aggregation) int { +func (s *deltaSum[N]) collect( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -79,33 +91,63 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int { sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // delta always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for _, val := range s.values { + s.hotColdValMap[readIdx].values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = val.n - collectExemplars(&dPts[i].Exemplars, val.res.Collect) + dPts[i].Value = val.n.load() i++ - } - // Do not report stale values. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() // The delta collection cycle resets. s.start = t sData.DataPoints = dPts *dest = sData - return n + return i +} + +// newCumulativeSum returns an aggregator that summarizes a set of measurements +// as their arithmetic sum. Each sum is scoped by attributes and the +// aggregation cycle the measurements were made in. +func newCumulativeSum[N int64 | float64]( + monotonic bool, + limit int, + r func(attribute.Set) FilteredExemplarReservoir[N], +) *cumulativeSum[N] { + return &cumulativeSum[N]{ + monotonic: monotonic, + start: now(), + valueMap: valueMap[N]{ + values: limitedSyncMap{aggLimit: limit}, + newRes: r, + }, + } } -func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { +// deltaSum is the storage for sums which never reset. +type cumulativeSum[N int64 | float64] struct { + monotonic bool + start time.Time + + valueMap[N] +} + +func (s *cumulativeSum[N]) collect( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -114,30 +156,33 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) - dPts := reset(sData.DataPoints, n, n) + // Values are being concurrently written while we iterate, so only use the + // current length for capacity. + dPts := reset(sData.DataPoints, 0, s.values.Len()) var i int - for _, value := range s.values { - dPts[i].Attributes = value.attrs - dPts[i].StartTime = s.start - dPts[i].Time = t - dPts[i].Value = value.n - collectExemplars(&dPts[i].Exemplars, value.res.Collect) + s.values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + newPt := metricdata.DataPoint[N]{ + Attributes: val.attrs, + StartTime: s.start, + Time: t, + Value: val.n.load(), + } + collectExemplars(&newPt.Exemplars, val.res.Collect) + dPts = append(dPts, newPt) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. i++ - } + return true + }) sData.DataPoints = dPts *dest = sData - return n + return i } // newPrecomputedSum returns an aggregator that summarizes a set of @@ -149,25 +194,22 @@ func newPrecomputedSum[N int64 | float64]( r func(attribute.Set) FilteredExemplarReservoir[N], ) *precomputedSum[N] { return &precomputedSum[N]{ - valueMap: newValueMap[N](limit, r), - monotonic: monotonic, - start: now(), + deltaSum: newDeltaSum(monotonic, limit, r), } } // precomputedSum summarizes a set of observations as their arithmetic sum. type precomputedSum[N int64 | float64] struct { - *valueMap[N] + *deltaSum[N] - monotonic bool - start time.Time - - reported map[attribute.Distinct]N + reported map[any]N } -func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { +func (s *precomputedSum[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() - newReported := make(map[attribute.Distinct]N) + newReported := make(map[any]N) // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. @@ -175,27 +217,29 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // delta always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for key, value := range s.values { - delta := value.n - s.reported[key] + s.hotColdValMap[readIdx].values.Range(func(key, value any) bool { + val := value.(*sumValue[N]) + n := val.n.load() - dPts[i].Attributes = value.attrs + delta := n - s.reported[key] + collectExemplars(&dPts[i].Exemplars, val.res.Collect) + dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta - collectExemplars(&dPts[i].Exemplars, value.res.Collect) - - newReported[key] = value.n + newReported[key] = n i++ - } - // Unused attribute sets do not report. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() s.reported = newReported // The delta collection cycle resets. s.start = t @@ -203,10 +247,12 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { sData.DataPoints = dPts *dest = sData - return n + return i } -func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *precomputedSum[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -215,27 +261,28 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic - s.Lock() - defer s.Unlock() - - n := len(s.values) + // cumulative precomputed always clears values on collection + readIdx := s.hcwg.swapHotAndWait() + // The len will not change while we iterate over values, since we waited + // for all writes to finish to the cold values and len. + n := s.hotColdValMap[readIdx].values.Len() dPts := reset(sData.DataPoints, n, n) var i int - for _, val := range s.values { + s.hotColdValMap[readIdx].values.Range(func(_, value any) bool { + val := value.(*sumValue[N]) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) dPts[i].Attributes = val.attrs dPts[i].StartTime = s.start dPts[i].Time = t - dPts[i].Value = val.n - collectExemplars(&dPts[i].Exemplars, val.res.Collect) - + dPts[i].Value = val.n.load() i++ - } - // Unused attribute sets do not report. - clear(s.values) + return true + }) + s.hotColdValMap[readIdx].values.Clear() sData.DataPoints = dPts *dest = sData - return n + return i } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go new file mode 100644 index 00000000..41cfc6bc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides experimental observability instrumentation for the +// metric reader. +package observ // import "go.opentelemetry.io/otel/sdk/metric/internal/observ" + +import ( + "context" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the unique name of the meter used for instrumentation. + ScopeName = "go.opentelemetry.io/otel/sdk/metric/internal/observ" + + // SchemaURL is the schema URL of the metrics produced by this + // instrumentation. + SchemaURL = semconv.SchemaURL +) + +var ( + measureAttrsPool = &sync.Pool{ + New: func() any { + const n = 1 + // component.name + 1 + // component.type + 1 // error.type + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + +func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) } + +func put[T any](p *sync.Pool, s *[]T) { + *s = (*s)[:0] // Reset. + p.Put(s) +} + +// ComponentName returns the component name for the metric reader with the +// provided ComponentType and ID. +func ComponentName(componentType string, id int64) string { + return fmt.Sprintf("%s/%d", componentType, id) +} + +// Instrumentation is experimental instrumentation for the metric reader. +type Instrumentation struct { + colDuration metric.Float64Histogram + + attrs []attribute.KeyValue + recOpt metric.RecordOption +} + +// NewInstrumentation returns instrumentation for metric reader with the provided component +// type (such as periodic and manual metric reader) and ID. It uses the global +// MeterProvider to create the instrumentation. +// +// The id should be the unique metric reader instance ID. It is used +// to set the "component.name" attribute. +// +// If the experimental observability is disabled, nil is returned. +func NewInstrumentation(componentType string, id int64) (*Instrumentation, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + i := &Instrumentation{ + attrs: []attribute.KeyValue{ + semconv.OTelComponentName(ComponentName(componentType, id)), + semconv.OTelComponentTypeKey.String(componentType), + }, + } + + r := attribute.NewSet(i.attrs...) + i.recOpt = metric.WithAttributeSet(r) + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + colDuration, err := otelconv.NewSDKMetricReaderCollectionDuration(meter) + if err != nil { + err = fmt.Errorf("failed to create collection duration metric: %w", err) + } + i.colDuration = colDuration.Inst() + + return i, err +} + +// CollectMetrics instruments the collect method of metric reader. It returns an +// [CollectOp] that must have its [CollectOp.End] method called when the +// collection end. +func (i *Instrumentation) CollectMetrics(ctx context.Context) CollectOp { + start := time.Now() + + return CollectOp{ + ctx: ctx, + start: start, + inst: i, + } +} + +// CollectOp tracks the collect operation being observed by +// [Instrumentation.CollectMetrics]. +type CollectOp struct { + ctx context.Context + start time.Time + + inst *Instrumentation +} + +// End completes the observation of the operation being observed by a call to +// [Instrumentation.CollectMetrics]. +// +// Any error that is encountered is provided as err. +func (e CollectOp) End(err error) { + recOpt := get[metric.RecordOption](recordOptPool) + defer put(recordOptPool, recOpt) + *recOpt = append(*recOpt, e.inst.recordOption(err)) + + d := time.Since(e.start).Seconds() + e.inst.colDuration.Record(e.ctx, d, *recOpt...) +} + +// recordOption returns a RecordOption with attributes representing the +// outcome of the collection being recorded. +// +// If err is nil, the default recOpt of the Instrumentation is returned. +// +// Otherwise, a new RecordOption is returned with the base attributes of the +// Instrumentation plus the error.type attribute set to the type of the error. +func (i *Instrumentation) recordOption(err error) metric.RecordOption { + if err == nil { + return i.recOpt + } + + attrs := get[attribute.KeyValue](measureAttrsPool) + defer put(measureAttrsPool, attrs) + *attrs = append(*attrs, i.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using WithAttributes + // instead of WithAttributeSet. + return metric.WithAttributeSet(attribute.NewSet(*attrs...)) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go new file mode 100644 index 00000000..3be234a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" + +// ConcurrentSafe is an interface that can be embedded in an +// exemplar.Reservoir to indicate to the SDK that it is safe to invoke its +// methods concurrently. If this interface is not embedded, the SDK assumes it +// is not safe to call concurrently and locks around Reservoir methods. This +// is currently only used by the built-in reservoirs. +type ConcurrentSafe interface{ concurrentSafe() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go new file mode 100644 index 00000000..6cd213b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package reservoir contains experimental features used by built-in exemplar +// reservoirs which require coordination with the metrics SDK. +package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md index 59f736b7..be0714a5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -1,47 +1,16 @@ # Experimental Features -The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. +The Metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. These feature may change in backwards incompatible ways as feedback is applied. See the [Compatibility and Stability](#compatibility-and-stability) section for more information. ## Features -- [Cardinality Limit](#cardinality-limit) - [Exemplars](#exemplars) - [Instrument Enabled](#instrument-enabled) -### Cardinality Limit - -The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. - -This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. -The value must be an integer value. -All other values are ignored. - -If the value set is less than or equal to `0`, no limit will be applied. - -#### Examples - -Set the cardinality limit to 2000. - -```console -export OTEL_GO_X_CARDINALITY_LIMIT=2000 -``` - -Set an infinite cardinality limit (functionally equivalent to disabling the feature). - -```console -export OTEL_GO_X_CARDINALITY_LIMIT=-1 -``` - -Disable the cardinality limit. - -```console -unset OTEL_GO_X_CARDINALITY_LIMIT -``` - ### Exemplars A sample of measurements made may be exported directly as a set of exemplars. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go index a9860623..294dcf84 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -10,25 +10,8 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( "context" "os" - "strconv" ) -// CardinalityLimit is an experimental feature flag that defines if -// cardinality limits should be applied to the recorded metric data-points. -// -// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment -// variable to the integer limit value you want to use. -// -// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 -// will disable the cardinality limits. -var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { - n, err := strconv.Atoi(v) - if err != nil { - return 0, false - } - return n, true -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { @@ -36,6 +19,7 @@ type Feature[T any] struct { parse func(v string) (T, bool) } +//nolint:unused func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" return Feature[T]{ @@ -63,7 +47,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok @@ -73,7 +57,7 @@ func (f Feature[T]) Enabled() bool { // // EnabledInstrument interface is implemented by synchronous instruments. type EnabledInstrument interface { - // Enabled returns whether the instrument will process measurements for the given context. + // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index 96e77908..5b063020 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -10,10 +10,18 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) +const ( + // ManualReaderType uniquely identifies the OpenTelemetry Metric Reader component + // being instrumented. + manualReaderType = "go.opentelemetry.io/otel/sdk/metric/metric.ManualReader" +) + // ManualReader is a simple Reader that allows an application to // read metrics on demand. type ManualReader struct { @@ -26,6 +34,8 @@ type ManualReader struct { temporalitySelector TemporalitySelector aggregationSelector AggregationSelector + + inst *observ.Instrumentation } // Compile time check the manualReader implements Reader and is comparable. @@ -39,9 +49,24 @@ func NewManualReader(opts ...ManualReaderOption) *ManualReader { aggregationSelector: cfg.aggregationSelector, } r.externalProducers.Store(cfg.producers) + + var err error + r.inst, err = observ.NewInstrumentation(manualReaderType, nextManualReaderID()) + if err != nil { + otel.Handle(err) + } + return r } +var manualReaderIDCounter atomic.Int64 + +// nextManualReaderID returns an identifier for this manual reader, +// starting with 0 and incrementing by 1 each time it is called. +func nextManualReaderID() int64 { + return manualReaderIDCounter.Add(1) - 1 +} + // register stores the sdkProducer which enables the caller // to read metrics from the SDK on demand. func (mr *ManualReader) register(p sdkProducer) { @@ -93,12 +118,20 @@ func (mr *ManualReader) Shutdown(context.Context) error { // // This method is safe to call concurrently. func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { + var err error + if mr.inst != nil { + cp := mr.inst.CollectMetrics(ctx) + defer func() { cp.End(err) }() + } + if rm == nil { - return errors.New("manual reader: *metricdata.ResourceMetrics is nil") + err = errors.New("manual reader: *metricdata.ResourceMetrics is nil") + return err } p := mr.sdkProducer.Load() if p == nil { - return ErrReaderNotRegistered + err = ErrReaderNotRegistered + return err } ph, ok := p.(produceHolder) @@ -107,11 +140,11 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. - err := fmt.Errorf("manual reader: invalid producer: %T", p) + err = fmt.Errorf("manual reader: invalid producer: %T", p) return err } - err := ph.produce(ctx, rm) + err = ph.produce(ctx, rm) if err != nil { return err } @@ -129,7 +162,7 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr } // MarshalLog returns logging data about the ManualReader. -func (r *ManualReader) MarshalLog() interface{} { +func (r *ManualReader) MarshalLog() any { r.mu.Lock() down := r.isShutdown r.mu.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index c500fd9f..e0a1e90e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) @@ -423,7 +422,7 @@ func (m *meter) Float64ObservableGauge( } func validateInstrumentName(name string) error { - if len(name) == 0 { + if name == "" { return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) } if len(name) > 255 { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go index 4da833cd..129cc643 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go @@ -18,8 +18,9 @@ const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTempora var _Temporality_index = [...]uint8{0, 20, 41, 57} func (i Temporality) String() string { - if i >= Temporality(len(_Temporality_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Temporality_index)-1 { return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] + return _Temporality_name[_Temporality_index[idx]:_Temporality_index[idx+1]] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 0a48aed7..e78402af 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -13,7 +13,9 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/internal/observ" "go.opentelemetry.io/otel/sdk/metric/metricdata" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // Default periodic reader timing. @@ -114,7 +116,7 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri cancel: cancel, done: make(chan struct{}), rmPool: sync.Pool{ - New: func() interface{} { + New: func() any { return &metricdata.ResourceMetrics{} }, }, @@ -126,9 +128,26 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri r.run(ctx, conf.interval) }() + var err error + r.inst, err = observ.NewInstrumentation( + semconv.OTelComponentTypePeriodicMetricReader.Value.AsString(), + nextPeriodicReaderID(), + ) + if err != nil { + otel.Handle(err) + } + return r } +var periodicReaderIDCounter atomic.Int64 + +// nextPeriodicReaderID returns an identifier for this periodic reader, +// starting with 0 and incrementing by 1 each time it is called. +func nextPeriodicReaderID() int64 { + return periodicReaderIDCounter.Add(1) - 1 +} + // PeriodicReader is a Reader that continuously collects and exports metric // data at a set interval. type PeriodicReader struct { @@ -148,6 +167,8 @@ type PeriodicReader struct { shutdownOnce sync.Once rmPool sync.Pool + + inst *observ.Instrumentation } // Compile time check the periodicReader implements Reader and is comparable. @@ -234,9 +255,16 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet } // collect unwraps p as a produceHolder and returns its produce results. -func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { +func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error { + var err error + if r.inst != nil { + cp := r.inst.CollectMetrics(ctx) + defer func() { cp.End(err) }() + } + if p == nil { - return ErrReaderNotRegistered + err = ErrReaderNotRegistered + return err } ph, ok := p.(produceHolder) @@ -245,11 +273,11 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. - err := fmt.Errorf("periodic reader: invalid producer: %T", p) + err = fmt.Errorf("periodic reader: invalid producer: %T", p) return err } - err := ph.produce(ctx, rm) + err = ph.produce(ctx, rm) if err != nil { return err } @@ -349,7 +377,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the PeriodicReader. -func (r *PeriodicReader) MarshalLog() interface{} { +func (r *PeriodicReader) MarshalLog() any { r.mu.Lock() down := r.isShutdown r.mu.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 7bdb699c..408fddc8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -17,7 +17,6 @@ import ( "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" - "go.opentelemetry.io/otel/sdk/metric/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) @@ -37,17 +36,24 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { +func newPipeline( + res *resource.Resource, + reader Reader, + views []View, + exemplarFilter exemplar.Filter, + cardinalityLimit int, +) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ - resource: res, - reader: reader, - views: views, - int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, - float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, - exemplarFilter: exemplarFilter, + resource: res, + reader: reader, + views: views, + int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, + float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, + exemplarFilter: exemplarFilter, + cardinalityLimit: cardinalityLimit, // aggregations is lazy allocated when needed. } } @@ -65,12 +71,13 @@ type pipeline struct { views []View sync.Mutex - int64Measures map[observableID[int64]][]aggregate.Measure[int64] - float64Measures map[observableID[float64]][]aggregate.Measure[float64] - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List - exemplarFilter exemplar.Filter + int64Measures map[observableID[int64]][]aggregate.Measure[int64] + float64Measures map[observableID[float64]][]aggregate.Measure[float64] + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List + exemplarFilter exemplar.Filter + cardinalityLimit int } // addInt64Measure adds a new int64 measure to the pipeline for each observer. @@ -388,10 +395,9 @@ func (i *inserter[N]) cachedAggregator( b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation // limits for the builder (an all the created aggregates). - // CardinalityLimit.Lookup returns 0 by default if unset (or + // cardinalityLimit will be 0 by default if unset (or // unrecognized input). Use that value directly. - b.AggregationLimit, _ = x.CardinalityLimit.Lookup() - + b.AggregationLimit = i.pipeline.cardinalityLimit in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) if err != nil { return aggVal[N]{0, nil, err} @@ -426,7 +432,7 @@ func (i *inserter[N]) logConflict(id instID) { } const msg = "duplicate metric stream definitions" - args := []interface{}{ + args := []any{ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), @@ -460,7 +466,7 @@ func (i *inserter[N]) logConflict(id instID) { global.Warn(msg, args...) } -func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { +func (*inserter[N]) instID(kind InstrumentKind, stream Stream) instID { var zero N return instID{ Name: stream.Name, @@ -590,10 +596,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { +func newPipelines( + res *resource.Resource, + readers []Reader, + views []View, + exemplarFilter exemplar.Filter, + cardinalityLimit int, +) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { - p := newPipeline(res, r, views, exemplarFilter) + p := newPipeline(res, r, views, exemplarFilter, cardinalityLimit) r.register(p) pipes = append(pipes, p) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index 2fca89e5..b0a6ec58 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ - pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), + pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter, conf.cardinalityLimit), forceFlush: flush, shutdown: sdown, } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index c96e500a..7b205c73 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -117,7 +117,7 @@ type produceHolder struct { type shutdownProducer struct{} // produce returns an ErrReaderShutdown error. -func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { +func (shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { return ErrReaderShutdown } @@ -127,10 +127,40 @@ type TemporalitySelector func(InstrumentKind) metricdata.Temporality // DefaultTemporalitySelector is the default TemporalitySelector used if // WithTemporalitySelector is not provided. CumulativeTemporality will be used // for all instrument kinds if this TemporalitySelector is used. -func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { +func DefaultTemporalitySelector(k InstrumentKind) metricdata.Temporality { + return CumulativeTemporalitySelector(k) +} + +// CumulativeTemporalitySelector is the TemporalitySelector that uses +// a cumulative temporality for all instrument kinds. +func CumulativeTemporalitySelector(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } +// DeltaTemporalitySelector is the TemporalitySelector that uses +// a delta temporality for instrument kinds: counter, histogram, observable counter +// All other instruments use cumulative temporality. +func DeltaTemporalitySelector(k InstrumentKind) metricdata.Temporality { + switch k { + case InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindObservableCounter: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + +// LowMemoryTemporalitySelector is the TemporalitySelector that uses +// delta temporality for counters and histograms. All other instruments use +// cumulative temporality. +func LowMemoryTemporalitySelector(k InstrumentKind) metricdata.Temporality { + switch k { + case InstrumentKindCounter, InstrumentKindHistogram: + return metricdata.DeltaTemporality + default: + return metricdata.CumulativeTemporality + } +} + // AggregationSelector selects the aggregation and the parameters to use for // that aggregation based on the InstrumentKind. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index 0e5adc1a..ae5b57b1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.37.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index cefe4ab9..3f20eb7a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { +func (sd stringDetector) Detect(context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index 0d861971..bbe142d2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type containerIDProvider func() (string, error) @@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 16a062ad..4a1b017e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 78190392..5fed33d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type hostIDProvider func() (string, error) @@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) { type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (hostIDDetector) Detect(context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go index cc8b8938..4c1c30f2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build dragonfly || freebsd || netbsd || openbsd || solaris -// +build dragonfly freebsd netbsd openbsd solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go index f84f1732..4a26096c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build linux -// +build linux package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go index df12c44c..63ad2fa4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 3677c83d..2b8ca20b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build windows -// +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 01b4d27a..51da76e8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type osDescriptionProvider func() (string, error) @@ -32,7 +32,7 @@ type ( // Detect returns a *Resource that describes the operating system type the // service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { +func (osTypeDetector) Detect(context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) @@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the operating system the // service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go index f537e5ca..a1763267 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" @@ -63,12 +62,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string { return values } -// skip returns true if the line is blank or starts with a '#' character, and +// skip reports whether the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) - return len(line) == 0 || strings.HasPrefix(line, "#") + return line == "" || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then @@ -76,7 +75,7 @@ func skip(line string) bool { func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") - if !found || len(k) == 0 { + if !found || k == "" { return "", "", false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index a6ff26a4..6c50ab68 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go index a77742b0..25f62953 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 6712ce80..138e5772 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -112,19 +112,19 @@ type ( // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (processPIDDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err @@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err // Detect returns a *Resource that describes all the command arguments as received // by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { +func (processOwnerDetector) Detect(context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err @@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index 09b91e1e..28e1e4f7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -112,7 +112,7 @@ func (r *Resource) String() string { } // MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() interface{} { +func (r *Resource) MarshalLog() any { return struct { Attributes attribute.Set SchemaURL string @@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns whether r and o represent the same resource. Two resources can +// Equal reports whether r and o represent the same resource. Two resources can // be equal even if they have different schema URLs. // // See the documentation on the [Resource] type for the pitfalls of using == diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 6966ed86..7d15cbb9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -12,14 +12,17 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 + DefaultMaxQueueSize = 2048 + // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. + DefaultScheduleDelay = 5000 + // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) @@ -67,6 +70,8 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 + inst *observ.BSP + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer @@ -87,11 +92,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } + maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) } o := BatchSpanProcessorOptions{ @@ -112,6 +113,16 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } + var err error + bsp.inst, err = observ.NewBSP( + nextProcessorID(), + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) + } + bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() @@ -122,8 +133,16 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO return bsp } +var processorIDCounter atomic.Int64 + +// nextProcessorID returns an identifier for this batch span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextProcessorID() int64 { + return processorIDCounter.Add(1) - 1 +} + // OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} +func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -162,6 +181,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } + if bsp.inst != nil { + err = errors.Join(err, bsp.inst.Shutdown()) + } }) return err } @@ -171,7 +193,7 @@ type forceFlushSpan struct { flushed chan struct{} } -func (f forceFlushSpan) SpanContext() trace.SpanContext { +func (forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } @@ -274,6 +296,9 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + if bsp.inst != nil { + bsp.inst.Processed(ctx, int64(l)) + } err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -382,11 +407,14 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) + } return false } } -func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } @@ -396,12 +424,15 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b return true default: atomic.AddUint32(&bsp.dropped, 1) + if bsp.inst != nil { + bsp.inst.ProcessedQueueFull(ctx, 1) + } } return false } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { +func (bsp *batchSpanProcessor) MarshalLog() any { return struct { Type string SpanExporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go index 1f60524e..b502c7d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. + +See [go.opentelemetry.io/otel/sdk/internal/x] for information about +the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index c8d3fb7e..3649322a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -32,7 +32,7 @@ type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { +func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { sid := trace.SpanID{} for { binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) @@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { +func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { tid := trace.TraceID{} sid := trace.SpanID{} for { diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go rename to vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go index e3309231..58f68df4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go @@ -3,7 +3,7 @@ // Package env provides types and functionality for environment variable support // in the OpenTelemetry SDK. -package env // import "go.opentelemetry.io/otel/sdk/internal/env" +package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env" import ( "os" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go new file mode 100644 index 00000000..bd7fe236 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +const ( + // ScopeName is the name of the instrumentation scope. + ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ" + + // SchemaURL is the schema URL of the instrumentation. + SchemaURL = semconv.SchemaURL +) + +// ErrQueueFull is the attribute value for the "queue_full" error type. +var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType( + otelconv.ErrorTypeAttr("queue_full"), +) + +// BSPComponentName returns the component name attribute for a +// BatchSpanProcessor with the given ID. +func BSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeBatchingSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// BSP is the instrumentation for an OTel SDK BatchSpanProcessor. +type BSP struct { + reg metric.Registration + + processed metric.Int64Counter + processedOpts []metric.AddOption + processedQueueFullOpts []metric.AddOption +} + +func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + if err != nil { + err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err) + } + qCapInst := qCap.Inst() + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP queue size metric: %w", e) + err = errors.Join(err, e) + } + qSizeInst := qSize.Inst() + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + cmpnt := BSPComponentName(id) + set := attribute.NewSet(cmpnt, cmpntT) + + obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)} + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSizeInst, qLen(), obsOpts...) + o.ObserveInt64(qCapInst, qMax, obsOpts...) + return nil + }, + qSizeInst, + qCapInst, + ) + if e != nil { + e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e) + err = errors.Join(err, e) + } + + processed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + if e != nil { + e := fmt.Errorf("failed to create BSP processed spans metric: %w", e) + err = errors.Join(err, e) + } + processedOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull) + processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)} + + return &BSP{ + reg: reg, + processed: processed.Inst(), + processedOpts: processedOpts, + processedQueueFullOpts: processedQueueFullOpts, + }, err +} + +func (b *BSP) Shutdown() error { return b.reg.Unregister() } + +func (b *BSP) Processed(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedOpts...) +} + +func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) { + b.processed.Add(ctx, n, b.processedQueueFullOpts...) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go new file mode 100644 index 00000000..b542121e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package observ provides observability instrumentation for the OTel trace SDK +// package. +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go new file mode 100644 index 00000000..7d338706 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" +) + +var measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, +} + +// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor. +type SSP struct { + spansProcessedCounter metric.Int64Counter + addOpts []metric.AddOption + attrs []attribute.KeyValue +} + +// SSPComponentName returns the component name attribute for a +// SimpleSpanProcessor with the given ID. +func SSPComponentName(id int64) attribute.KeyValue { + t := otelconv.ComponentTypeSimpleSpanProcessor + name := fmt.Sprintf("%s/%d", t, id) + return semconv.OTelComponentName(name) +} + +// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the +// provided ID. +// +// If the experimental observability is disabled, nil is returned. +func NewSSP(id int64) (*SSP, error) { + if !x.Observability.Enabled() { + return nil, nil + } + + meter := otel.GetMeterProvider().Meter( + ScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), + ) + spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter) + if err != nil { + err = fmt.Errorf("failed to create SSP processed spans metric: %w", err) + } + + componentName := SSPComponentName(id) + componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor) + attrs := []attribute.KeyValue{componentName, componentType} + addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))} + + return &SSP{ + spansProcessedCounter: spansProcessedCounter.Inst(), + addOpts: addOpts, + attrs: attrs, + }, err +} + +// SpanProcessed records that a span has been processed by the SimpleSpanProcessor. +// If err is non-nil, it records the processing error as an attribute. +func (ssp *SSP) SpanProcessed(ctx context.Context, err error) { + ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...) +} + +func (ssp *SSP) addOption(err error) []metric.AddOption { + if err == nil { + return ssp.addOpts + } + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, ssp.attrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go new file mode 100644 index 00000000..a8a16458 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go @@ -0,0 +1,223 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/internal/x" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" + "go.opentelemetry.io/otel/trace" +) + +var meterOpts = []metric.MeterOption{ + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(SchemaURL), +} + +// Tracer is instrumentation for an OTel SDK Tracer. +type Tracer struct { + enabled bool + + live metric.Int64UpDownCounter + started metric.Int64Counter +} + +func NewTracer() (Tracer, error) { + if !x.Observability.Enabled() { + return Tracer{}, nil + } + meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...) + + var err error + l, e := otelconv.NewSDKSpanLive(meter) + if e != nil { + e = fmt.Errorf("failed to create span live metric: %w", e) + err = errors.Join(err, e) + } + + s, e := otelconv.NewSDKSpanStarted(meter) + if e != nil { + e = fmt.Errorf("failed to create span started metric: %w", e) + err = errors.Join(err, e) + } + + return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err +} + +func (t Tracer) Enabled() bool { return t.enabled } + +func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) { + key := spanStartedKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + opts := spanStartedOpts[key] + t.started.Add(ctx, 1, opts...) +} + +func (t Tracer) SpanLive(ctx context.Context, span trace.Span) { + t.spanLive(ctx, 1, span) +} + +func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) { + t.spanLive(ctx, -1, span) +} + +func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) { + key := spanLiveKey{sampled: span.SpanContext().IsSampled()} + opts := spanLiveOpts[key] + t.live.Add(ctx, value, opts...) +} + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedKey struct { + parent parentState + sampling samplingState +} + +var spanStartedOpts = map[spanStartedKey][]metric.AddOption{ + { + parentStateNoParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateLocalParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + { + parentStateRemoteParent, + samplingStateDrop, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + )), + }, + + { + parentStateNoParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateLocalParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordOnly, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + )), + }, + + { + parentStateNoParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateLocalParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, + { + parentStateRemoteParent, + samplingStateRecordAndSample, + }: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + )), + }, +} + +type spanLiveKey struct { + sampled bool +} + +var spanLiveOpts = map[spanLiveKey][]metric.AddOption{ + {true}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + )), + }, + {false}: { + metric.WithAttributeSet(attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + )), + }, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 0e2a2e7c..d2cf4ebd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -13,14 +13,13 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) -const ( - defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" -) +const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" // tracerProviderConfig. type tracerProviderConfig struct { @@ -45,7 +44,7 @@ type tracerProviderConfig struct { } // MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() interface{} { +func (cfg tracerProviderConfig) MarshalLog() any { return struct { SpanProcessors []SpanProcessor SamplerType string @@ -159,6 +158,13 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T provider: p, instrumentationScope: is, } + + var err error + t.inst, err = observ.NewTracer() + if err != nil { + otel.Handle(err) + } + p.namedTracer[is] = t } return t, ok diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index aa7b262d..689663d4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOnSampler) Description() string { +func (alwaysOnSampler) Description() string { return "AlwaysOnSampler" } @@ -131,14 +131,14 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOffSampler) Description() string { +func (alwaysOffSampler) Description() string { return "AlwaysOffSampler" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 664e13e0..771e427a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" + "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" + "go.opentelemetry.io/otel/trace" ) // simpleSpanProcessor is a SpanProcessor that synchronously sends all @@ -17,6 +20,8 @@ type simpleSpanProcessor struct { exporterMu sync.Mutex exporter SpanExporter stopOnce sync.Once + + inst *observ.SSP } var _ SpanProcessor = (*simpleSpanProcessor)(nil) @@ -33,24 +38,48 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, } + + var err error + ssp.inst, err = observ.NewSSP(nextSimpleProcessorID()) + if err != nil { + otel.Handle(err) + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") return ssp } +var simpleProcessorIDCounter atomic.Int64 + +// nextSimpleProcessorID returns an identifier for this simple span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextSimpleProcessorID() int64 { + return simpleProcessorIDCounter.Add(1) - 1 +} + // OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { ssp.exporterMu.Lock() defer ssp.exporterMu.Unlock() + var err error if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { - if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}) + if err != nil { otel.Handle(err) } } + + if ssp.inst != nil { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext()) + ssp.inst.SpanProcessed(ctx, err) + } } // Shutdown shuts down the exporter this SimpleSpanProcessor exports to. @@ -104,13 +133,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { +func (*simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent // this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { +func (ssp *simpleSpanProcessor) MarshalLog() any { return struct { Type string Exporter SpanExporter diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index d511d0f2..63aa3378 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -35,7 +35,7 @@ type snapshot struct { var _ ReadOnlySpan = snapshot{} -func (s snapshot) private() {} +func (snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 1785a4bb..8cfd9f62 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -61,6 +61,7 @@ type ReadOnlySpan interface { InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. @@ -150,6 +151,12 @@ type recordingSpan struct { // tracer is the SDK tracer that created this span. tracer *tracer + + // origCtx is the context used when starting this span that has the + // recordingSpan instance set as the active span. If not nil, it is used + // when ending the span to ensure any metrics are recorded with a context + // containing this span without requiring an additional allocation. + origCtx context.Context } var ( @@ -157,6 +164,10 @@ var ( _ runtimeTracer = (*recordingSpan)(nil) ) +func (s *recordingSpan) setOrigCtx(ctx context.Context) { + s.origCtx = ctx +} + // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { @@ -165,7 +176,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext { return s.spanContext } -// IsRecording returns if this span is being recorded. If this span has ended +// IsRecording reports whether this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { @@ -177,7 +188,7 @@ func (s *recordingSpan) IsRecording() bool { return s.isRecording() } -// isRecording returns if this span is being recorded. If this span has ended +// isRecording reports whether this span is being recorded. If this span has ended // this will return false. // // This method assumes s.mu.Lock is held by the caller. @@ -495,6 +506,17 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() + if s.tracer.inst.Enabled() { + ctx := s.origCtx + if ctx == nil { + // This should not happen as the origCtx should be set, but + // ensure trace information is propagated in the case of an + // error. + ctx = trace.ContextWithSpan(context.Background(), s) + } + defer s.tracer.inst.SpanEnded(ctx, s) + } + sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return @@ -545,7 +567,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { s.addEvent(semconv.ExceptionEventName, opts...) } -func typeStr(i interface{}) string { +func typeStr(i any) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go index bec5e209..321d9743 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -3,7 +3,7 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" -import "go.opentelemetry.io/otel/sdk/internal/env" +import "go.opentelemetry.io/otel/sdk/trace/internal/env" const ( // DefaultAttributeValueLengthLimit is the default maximum allowed diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 0b65ae9a..e1d08fd4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -8,6 +8,7 @@ import ( "time" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/trace/internal/observ" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -17,6 +18,8 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope + + inst observ.Tracer } var _ trace.Tracer = &tracer{} @@ -46,17 +49,32 @@ func (tr *tracer) Start( } s := tr.newSpan(ctx, name, &config) + newCtx := trace.ContextWithSpan(ctx, s) + if tr.inst.Enabled() { + if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok { + // If this is a recording span, store the original context. + // This allows later retrieval of baggage and other information + // that may have been stored in the context at span start time and + // to avoid the allocation of repeatedly calling + // trace.ContextWithSpan. + o.setOrigCtx(newCtx) + } + psc := trace.SpanContextFromContext(ctx) + tr.inst.SpanStarted(newCtx, psc, s) + } + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { + // Use original context. sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) + newCtx = rtt.runtimeTrace(newCtx) } - return trace.ContextWithSpan(ctx, s), s + return newCtx, s } type runtimeTracer interface { @@ -112,11 +130,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) + return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. func (tr *tracer) newRecordingSpan( + ctx context.Context, psc, sc trace.SpanContext, name string, sr SamplingResult, @@ -153,6 +172,13 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) + if tr.inst.Enabled() { + // Propagate any existing values from the context with the new span to + // the measurement context. + ctx = trace.ContextWithSpan(ctx, s) + tr.inst.SpanLive(ctx, s) + } + return s } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go index 07117495..e12fa67e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go @@ -25,10 +25,10 @@ func NewNoopExporter() *NoopExporter { type NoopExporter struct{} // ExportSpans handles export of spans by dropping them. -func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } +func (*NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } // Shutdown stops the exporter by doing nothing. -func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } +func (*NoopExporter) Shutdown(context.Context) error { return nil } var _ trace.SpanExporter = (*InMemoryExporter)(nil) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index 732669a1..ca63038f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -47,14 +47,14 @@ func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { // Shutdown does nothing. // // This method is safe to be called concurrently. -func (sr *SpanRecorder) Shutdown(context.Context) error { +func (*SpanRecorder) Shutdown(context.Context) error { return nil } // ForceFlush does nothing. // // This method is safe to be called concurrently. -func (sr *SpanRecorder) ForceFlush(context.Context) error { +func (*SpanRecorder) ForceFlush(context.Context) error { return nil } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index cd2cc30c..12b384b0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -37,7 +37,7 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { } ro := make([]tracesdk.ReadOnlySpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { ro[i] = s[i].Snapshot() } return ro diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go deleted file mode 100644 index b84dd2c5..00000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index c0217af6..0a3b3661 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.37.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go index 58b5edde..f18d6e3f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -121,7 +121,7 @@ func hostIPNamePort(hostWithPort string) (ip, name string, port int) { if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { port = int(parsedPort) // nolint: gosec // Bit size of 16 checked above. } - return + return ip, name, port } // EndUserAttributesFromHTTPRequest generates attributes of the diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/MIGRATION.md deleted file mode 100644 index 8a11ea28..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/MIGRATION.md +++ /dev/null @@ -1,155 +0,0 @@ -# Semantic Convention Changes - -The `go.opentelemetry.io/otel/semconv/v1.30.0` should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.28.0` with the following exceptions. - -Note: `go.opentelemetry.io/otel/semconv/v1.29.0` does not exist due to bugs from the upstream [OpenTelemetry Semantic Conventions]. - -## Dropped deprecations - -The following declarations have been deprecated in the [OpenTelemetry Semantic Conventions]. -Refer to the respective documentation in that repository for deprecation instructions for each type. - -- `CodeColumn` -- `CodeColumnKey` -- `CodeFunction` -- `CodeFunctionKey` -- `DBCassandraConsistencyLevelAll` -- `DBCassandraConsistencyLevelAny` -- `DBCassandraConsistencyLevelEachQuorum` -- `DBCassandraConsistencyLevelKey` -- `DBCassandraConsistencyLevelLocalOne` -- `DBCassandraConsistencyLevelLocalQuorum` -- `DBCassandraConsistencyLevelLocalSerial` -- `DBCassandraConsistencyLevelOne` -- `DBCassandraConsistencyLevelQuorum` -- `DBCassandraConsistencyLevelSerial` -- `DBCassandraConsistencyLevelThree` -- `DBCassandraConsistencyLevelTwo` -- `DBCassandraCoordinatorDC` -- `DBCassandraCoordinatorDCKey` -- `DBCassandraCoordinatorID` -- `DBCassandraCoordinatorIDKey` -- `DBCassandraIdempotence` -- `DBCassandraIdempotenceKey` -- `DBCassandraPageSize` -- `DBCassandraPageSizeKey` -- `DBCassandraSpeculativeExecutionCount` -- `DBCassandraSpeculativeExecutionCountKey` -- `DBCosmosDBClientID` -- `DBCosmosDBClientIDKey` -- `DBCosmosDBConnectionModeDirect` -- `DBCosmosDBConnectionModeGateway` -- `DBCosmosDBConnectionModeKey` -- `DBCosmosDBOperationTypeBatch` -- `DBCosmosDBOperationTypeCreate` -- `DBCosmosDBOperationTypeDelete` -- `DBCosmosDBOperationTypeExecute` -- `DBCosmosDBOperationTypeExecuteJavascript` -- `DBCosmosDBOperationTypeHead` -- `DBCosmosDBOperationTypeHeadFeed` -- `DBCosmosDBOperationTypeInvalid` -- `DBCosmosDBOperationTypeKey` -- `DBCosmosDBOperationTypePatch` -- `DBCosmosDBOperationTypeQuery` -- `DBCosmosDBOperationTypeQueryPlan` -- `DBCosmosDBOperationTypeRead` -- `DBCosmosDBOperationTypeReadFeed` -- `DBCosmosDBOperationTypeReplace` -- `DBCosmosDBOperationTypeUpsert` -- `DBCosmosDBRequestCharge` -- `DBCosmosDBRequestChargeKey` -- `DBCosmosDBRequestContentLength` -- `DBCosmosDBRequestContentLengthKey` -- `DBCosmosDBSubStatusCode` -- `DBCosmosDBSubStatusCodeKey` -- `DBElasticsearchNodeName` -- `DBElasticsearchNodeNameKey` -- `DBSystemAdabas` -- `DBSystemCache` -- `DBSystemCassandra` -- `DBSystemClickhouse` -- `DBSystemCloudscape` -- `DBSystemCockroachdb` -- `DBSystemColdfusion` -- `DBSystemCosmosDB` -- `DBSystemCouchDB` -- `DBSystemCouchbase` -- `DBSystemDb2` -- `DBSystemDerby` -- `DBSystemDynamoDB` -- `DBSystemEDB` -- `DBSystemElasticsearch` -- `DBSystemFilemaker` -- `DBSystemFirebird` -- `DBSystemFirstSQL` -- `DBSystemGeode` -- `DBSystemH2` -- `DBSystemHBase` -- `DBSystemHSQLDB` -- `DBSystemHanaDB` -- `DBSystemHive` -- `DBSystemInfluxdb` -- `DBSystemInformix` -- `DBSystemIngres` -- `DBSystemInstantDB` -- `DBSystemInterbase` -- `DBSystemIntersystemsCache` -- `DBSystemKey` -- `DBSystemMSSQL` -- `DBSystemMariaDB` -- `DBSystemMaxDB` -- `DBSystemMemcached` -- `DBSystemMongoDB` -- `DBSystemMssqlcompact` -- `DBSystemMySQL` -- `DBSystemNeo4j` -- `DBSystemNetezza` -- `DBSystemOpensearch` -- `DBSystemOracle` -- `DBSystemOtherSQL` -- `DBSystemPervasive` -- `DBSystemPointbase` -- `DBSystemPostgreSQL` -- `DBSystemProgress` -- `DBSystemRedis` -- `DBSystemRedshift` -- `DBSystemSpanner` -- `DBSystemSqlite` -- `DBSystemSybase` -- `DBSystemTeradata` -- `DBSystemTrino` -- `DBSystemVertica` -- `EventName` -- `EventNameKey` -- `ExceptionEscaped` -- `ExceptionEscapedKey` -- `GenAIOpenaiRequestSeed` -- `GenAIOpenaiRequestSeedKey` -- `ProcessExecutableBuildIDProfiling` -- `ProcessExecutableBuildIDProfilingKey` -- `SystemNetworkStateClose` -- `SystemNetworkStateCloseWait` -- `SystemNetworkStateClosing` -- `SystemNetworkStateDelete` -- `SystemNetworkStateEstablished` -- `SystemNetworkStateFinWait1` -- `SystemNetworkStateFinWait2` -- `SystemNetworkStateKey` -- `SystemNetworkStateLastAck` -- `SystemNetworkStateListen` -- `SystemNetworkStateSynRecv` -- `SystemNetworkStateSynSent` -- `SystemNetworkStateTimeWait` -- `VCSRepositoryChangeID` -- `VCSRepositoryChangeIDKey` -- `VCSRepositoryChangeTitle` -- `VCSRepositoryChangeTitleKey` -- `VCSRepositoryRefName` -- `VCSRepositoryRefNameKey` -- `VCSRepositoryRefRevision` -- `VCSRepositoryRefRevisionKey` -- `VCSRepositoryRefTypeBranch` -- `VCSRepositoryRefTypeKey` -- `VCSRepositoryRefTypeTag` - -[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/README.md deleted file mode 100644 index 072ea692..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.30.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.30.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.30.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/attribute_group.go deleted file mode 100644 index 60f3df0d..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/attribute_group.go +++ /dev/null @@ -1,12333 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.30.0" - -import "go.opentelemetry.io/otel/attribute" - -// Namespace: android -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version (`os.version`) of - // the android operating system. More information can be found [here]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "33", "32" - // - // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found [here]. -// -// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// Namespace: artifact -const ( - // ArtifactAttestationFilenameKey is the attribute Key conforming to the - // "artifact.attestation.filename" semantic conventions. It represents the - // provenance filename of the built attestation which directly relates to the - // build artifact filename. This filename SHOULD accompany the artifact at - // publish time. See the [SLSA Relationship] specification for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0.attestation", - // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", - // "file-name-package.tar.gz.intoto.json1" - // - // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations - ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") - - // ArtifactAttestationHashKey is the attribute Key conforming to the - // "artifact.attestation.hash" semantic conventions. It represents the full - // [hash value (see glossary)], of the built attestation. Some envelopes in the - // [software attestation space] also refer to this as the **digest**. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec - ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") - - // ArtifactAttestationIDKey is the attribute Key conforming to the - // "artifact.attestation.id" semantic conventions. It represents the id of the - // build [software attestation]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - // - // [software attestation]: https://slsa.dev/attestation-model - ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") - - // ArtifactFilenameKey is the attribute Key conforming to the - // "artifact.filename" semantic conventions. It represents the human readable - // file name of the artifact, typically generated during build and release - // processes. Often includes the package name and version in the file name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", - // "release-1.tar.gz", "file-name-package.tar.gz" - // Note: This file name can also act as the [Package Name] - // in cases where the package ecosystem maps accordingly. - // Additionally, the artifact [can be published] - // for others, but that is not a guarantee. - // - // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model - // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain - ArtifactFilenameKey = attribute.Key("artifact.filename") - - // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" - // semantic conventions. It represents the full [hash value (see glossary)], - // often found in checksum.txt on a release of the artifact and used to verify - // package integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" - // Note: The specific algorithm used to create the cryptographic hash value is - // not defined. In situations where an artifact has multiple - // cryptographic hashes, it is up to the implementer to choose which - // hash value to set here; this should be the most secure hash algorithm - // that is suitable for the situation and consistent with the - // corresponding attestation. The implementer can then provide the other - // hash values through an additional set of attribute extensions as they - // deem necessary. - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - ArtifactHashKey = attribute.Key("artifact.hash") - - // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" - // semantic conventions. It represents the [Package URL] of the - // [package artifact] provides a standard way to identify and locate the - // packaged artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pkg:github/package-url/purl-spec@1209109710924", - // "pkg:npm/foo@12.12.3" - // - // [Package URL]: https://github.com/package-url/purl-spec - // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model - ArtifactPurlKey = attribute.Key("artifact.purl") - - // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" - // semantic conventions. It represents the version of the artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v0.1.0", "1.2.1", "122691-build" - ArtifactVersionKey = attribute.Key("artifact.version") -) - -// ArtifactAttestationFilename returns an attribute KeyValue conforming to the -// "artifact.attestation.filename" semantic conventions. It represents the -// provenance filename of the built attestation which directly relates to the -// build artifact filename. This filename SHOULD accompany the artifact at -// publish time. See the [SLSA Relationship] specification for more information. -// -// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations -func ArtifactAttestationFilename(val string) attribute.KeyValue { - return ArtifactAttestationFilenameKey.String(val) -} - -// ArtifactAttestationHash returns an attribute KeyValue conforming to the -// "artifact.attestation.hash" semantic conventions. It represents the full -// [hash value (see glossary)], of the built attestation. Some envelopes in the -// [software attestation space] also refer to this as the **digest**. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec -func ArtifactAttestationHash(val string) attribute.KeyValue { - return ArtifactAttestationHashKey.String(val) -} - -// ArtifactAttestationID returns an attribute KeyValue conforming to the -// "artifact.attestation.id" semantic conventions. It represents the id of the -// build [software attestation]. -// -// [software attestation]: https://slsa.dev/attestation-model -func ArtifactAttestationID(val string) attribute.KeyValue { - return ArtifactAttestationIDKey.String(val) -} - -// ArtifactFilename returns an attribute KeyValue conforming to the -// "artifact.filename" semantic conventions. It represents the human readable -// file name of the artifact, typically generated during build and release -// processes. Often includes the package name and version in the file name. -func ArtifactFilename(val string) attribute.KeyValue { - return ArtifactFilenameKey.String(val) -} - -// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" -// semantic conventions. It represents the full [hash value (see glossary)], -// often found in checksum.txt on a release of the artifact and used to verify -// package integrity. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -func ArtifactHash(val string) attribute.KeyValue { - return ArtifactHashKey.String(val) -} - -// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" -// semantic conventions. It represents the [Package URL] of the -// [package artifact] provides a standard way to identify and locate the packaged -// artifact. -// -// [Package URL]: https://github.com/package-url/purl-spec -// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model -func ArtifactPurl(val string) attribute.KeyValue { - return ArtifactPurlKey.String(val) -} - -// ArtifactVersion returns an attribute KeyValue conforming to the -// "artifact.version" semantic conventions. It represents the version of the -// artifact. -func ArtifactVersion(val string) attribute.KeyValue { - return ArtifactVersionKey.String(val) -} - -// Namespace: aws -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the - // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the - // JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lives", "id" - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the value - // of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }" - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of the - // `Count` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the - // value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "CatsTable" - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }" - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` - // request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value of - // the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "name_to_group" - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the - // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents - // the JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of the - // `Limit` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }" - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value of - // the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, - // ProductReviews" - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.WriteCapacityUnits` request - // parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of - // the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of - // the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of the - // `Segment` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of the - // `Select` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ALL_ATTRIBUTES", "COUNT" - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the number of - // items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys in - // the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "Cats" - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the value - // of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS cluster]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - // - // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container instance]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" - // - // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" - // semantic conventions. It represents the ARN of a running [ECS task]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", - // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - // - // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family name of - // the [ECS task definition] used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-family" - // - // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID MUST - // be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", - // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision for - // the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "8", "26" - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") - - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS - // cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") - - // AWSExtendedRequestIDKey is the attribute Key conforming to the - // "aws.extended_request_id" semantic conventions. It represents the AWS - // extended request ID as returned in the response header `x-amz-id-2`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" - AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") - - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked - // ARN as provided on the `Context` passed to the function ( - // `Lambda-Runtime-Invoked-Function-Arn` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" - // Note: This may be different from `cloud.resource_id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource - // Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" - // Note: See the [log group ARN format documentation]. - // - // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of the - // AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/aws/lambda/my-function", "opentelemetry-service" - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each - // write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the - // AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - // Note: See the [log stream ARN format documentation]. One log group can - // contain several log streams, so these ARNs necessarily identify both a log - // group and a log stream. - // - // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) of the - // AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in the - // response headers `x-amzn-requestid`, `x-amzn-request-id` or - // `x-amz-request-id`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" - AWSRequestIDKey = attribute.Key("aws.request_id") - - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request refers to. - // Corresponds to the `--bucket` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-bucket-name" - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source object - // (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 API]. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [upload-part-copy] - // - // - // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" - // Note: The `delete` attribute is only applicable to the [delete-object] - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 API]. - // - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `key` attribute is applicable to all object-related S3 operations, - // i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [delete-object] - // - [get-object] - // - [head-object] - // - [put-object] - // - [restore-object] - // - [select-object-content] - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [create-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html - // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html - // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html - // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html - // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number of - // the part being uploaded in a multipart-upload operation. This is a positive - // integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the [upload-part] - // and [upload-part-copy] operations. - // The `part_number` attribute corresponds to the `--part-number` parameter of - // the - // [upload-part operation within the S3 API]. - // - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" - // semantic conventions. It represents the upload ID that identifies the - // multipart upload. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" - // Note: The `upload_id` attribute applies to S3 multipart-upload operations and - // corresponds to the `--upload-id` parameter - // of the [S3 API] multipart operations. - // This applies in particular to the following operations: - // - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to -// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents -// the JSON-serialized value of each item in the `AttributeDefinitions` request -// field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the -// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value -// of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the -// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the -// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the -// value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field. -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of the -// `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to -// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents -// the JSON-serialized value of the `ItemCollectionMetrics` response field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents -// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request -// field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of the -// `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming -// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of -// the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the -// `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value of -// the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an -// [ECS cluster]. -// -// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container instance]. -// -// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS task]. -// -// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task definition] used to create the ECS task. -// -// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" -// semantic conventions. It represents the ID of a running ECS task. The ID MUST -// be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// AWSExtendedRequestID returns an attribute KeyValue conforming to the -// "aws.extended_request_id" semantic conventions. It represents the AWS extended -// request ID as returned in the response header `x-amz-id-2`. -func AWSExtendedRequestID(val string) attribute.KeyValue { - return AWSExtendedRequestIDKey.String(val) -} - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked -// ARN as provided on the `Context` passed to the function ( -// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` -// applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of the -// AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" -// semantic conventions. It represents the AWS request ID as returned in the -// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` -// . -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" -// semantic conventions. It represents the S3 bucket name the request refers to. -// Corresponds to the `--bucket` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object (in -// the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" -// semantic conventions. It represents the delete request container that -// specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic -// conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Enum values for aws.ecs.launchtype -var ( - // ec2 - // Stability: development - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - // Stability: development - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Namespace: az -const ( - // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic - // conventions. It represents the [Azure Resource Provider Namespace] as - // recognized by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" - // - // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers - AzNamespaceKey = attribute.Key("az.namespace") - - // AzServiceRequestIDKey is the attribute Key conforming to the - // "az.service_request_id" semantic conventions. It represents the unique - // identifier of the service request. It's generated by the Azure service and - // returned with the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00000000-0000-0000-0000-000000000000" - AzServiceRequestIDKey = attribute.Key("az.service_request_id") -) - -// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" -// semantic conventions. It represents the [Azure Resource Provider Namespace] as -// recognized by the client. -// -// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers -func AzNamespace(val string) attribute.KeyValue { - return AzNamespaceKey.String(val) -} - -// AzServiceRequestID returns an attribute KeyValue conforming to the -// "az.service_request_id" semantic conventions. It represents the unique -// identifier of the service request. It's generated by the Azure service and -// returned with the response. -func AzServiceRequestID(val string) attribute.KeyValue { - return AzServiceRequestIDKey.String(val) -} - -// Namespace: azure -const ( - // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" - // semantic conventions. It represents the unique identifier of the client - // instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" - AzureClientIDKey = attribute.Key("azure.client.id") - - // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the - // "azure.cosmosdb.connection.mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") - - // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the - // "azure.cosmosdb.consistency.level" semantic conventions. It represents the - // account or request [consistency level]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", - // "Session" - // - // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels - AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") - - // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to - // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It - // represents the list of regions contacted during operation in the order that - // they were contacted. If there is more than one region listed, it indicates - // that the operation was performed on multiple regions i.e. cross-regional - // call. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "North Central US", "Australia East", "Australia Southeast" - // Note: Region name matches the format of `displayName` in [Azure Location API] - // - // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location - AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") - - // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the - // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents - // the number of request units consumed by the operation. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 46.18, 1.0 - AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") - - // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the - // "azure.cosmosdb.request.body.size" semantic conventions. It represents the - // request payload size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") - - // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the - // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents - // the cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000, 1002 - AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") -) - -// AzureClientID returns an attribute KeyValue conforming to the -// "azure.client.id" semantic conventions. It represents the unique identifier of -// the client instance. -func AzureClientID(val string) attribute.KeyValue { - return AzureClientIDKey.String(val) -} - -// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue -// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic -// conventions. It represents the list of regions contacted during operation in -// the order that they were contacted. If there is more than one region listed, -// it indicates that the operation was performed on multiple regions i.e. -// cross-regional call. -func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { - return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) -} - -// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming -// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It -// represents the number of request units consumed by the operation. -func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { - return AzureCosmosDBOperationRequestChargeKey.Float64(val) -} - -// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the -// "azure.cosmosdb.request.body.size" semantic conventions. It represents the -// request payload size in bytes. -func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { - return AzureCosmosDBRequestBodySizeKey.Int(val) -} - -// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to -// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It -// represents the cosmos DB sub status code. -func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { - return AzureCosmosDBResponseSubStatusCodeKey.Int(val) -} - -// Enum values for azure.cosmosdb.connection.mode -var ( - // Gateway (HTTP) connection. - // Stability: development - AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") - // Direct connection. - // Stability: development - AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") -) - -// Enum values for azure.cosmosdb.consistency.level -var ( - // strong - // Stability: development - AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") - // bounded_staleness - // Stability: development - AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") - // session - // Stability: development - AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") - // eventual - // Stability: development - AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") - // consistent_prefix - // Stability: development - AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") -) - -// Namespace: browser -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.brands`). - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the "browser.language" - // semantic conventions. It represents the preferred language of the user using - // the browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "en", "en-US", "fr", "fr-FR" - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the browser is - // running on a mobile device. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be - // left unset. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" - // semantic conventions. It represents the platform on which the browser is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Windows", "macOS", "Android" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the - // [W3C User-Agent Client Hints specification]. Note that some (but not all) of - // these values can overlap with values in the - // [`os.type` and `os.name` attributes]. However, for consistency, the values in - // the `browser.platform` attribute should capture the exact value that the user - // agent provides. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform - // [`os.type` and `os.name` attributes]: ./os.md - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" -// semantic conventions. It represents the array of brand name and version -// separated by a space. -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred language -// of the user using the browser. -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" -// semantic conventions. It represents a boolean that is true if the browser is -// running on a mobile device. -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running. -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Namespace: cassandra -const ( - // CassandraConsistencyLevelKey is the attribute Key conforming to the - // "cassandra.consistency.level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from [CQL]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html - CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") - - // CassandraCoordinatorDCKey is the attribute Key conforming to the - // "cassandra.coordinator.dc" semantic conventions. It represents the data - // center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: us-west-2 - CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") - - // CassandraCoordinatorIDKey is the attribute Key conforming to the - // "cassandra.coordinator.id" semantic conventions. It represents the ID of the - // coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af - CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") - - // CassandraPageSizeKey is the attribute Key conforming to the - // "cassandra.page.size" semantic conventions. It represents the fetch size used - // for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 5000 - CassandraPageSizeKey = attribute.Key("cassandra.page.size") - - // CassandraQueryIdempotentKey is the attribute Key conforming to the - // "cassandra.query.idempotent" semantic conventions. It represents the whether - // or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") - - // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the - // "cassandra.speculative_execution.count" semantic conventions. It represents - // the number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 2 - CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") -) - -// CassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "cassandra.coordinator.dc" semantic conventions. It represents the data center -// of the coordinating node for a query. -func CassandraCoordinatorDC(val string) attribute.KeyValue { - return CassandraCoordinatorDCKey.String(val) -} - -// CassandraCoordinatorID returns an attribute KeyValue conforming to the -// "cassandra.coordinator.id" semantic conventions. It represents the ID of the -// coordinating node for a query. -func CassandraCoordinatorID(val string) attribute.KeyValue { - return CassandraCoordinatorIDKey.String(val) -} - -// CassandraPageSize returns an attribute KeyValue conforming to the -// "cassandra.page.size" semantic conventions. It represents the fetch size used -// for paging, i.e. how many rows will be returned at once. -func CassandraPageSize(val int) attribute.KeyValue { - return CassandraPageSizeKey.Int(val) -} - -// CassandraQueryIdempotent returns an attribute KeyValue conforming to the -// "cassandra.query.idempotent" semantic conventions. It represents the whether -// or not the query is idempotent. -func CassandraQueryIdempotent(val bool) attribute.KeyValue { - return CassandraQueryIdempotentKey.Bool(val) -} - -// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to -// the "cassandra.speculative_execution.count" semantic conventions. It -// represents the number of times a query was speculatively executed. Not set or -// `0` if the query was not executed speculatively. -func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return CassandraSpeculativeExecutionCountKey.Int(val) -} - -// Enum values for cassandra.consistency.level -var ( - // all - // Stability: development - CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") - // each_quorum - // Stability: development - CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") - // quorum - // Stability: development - CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") - // local_quorum - // Stability: development - CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") - // one - // Stability: development - CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") - // two - // Stability: development - CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") - // three - // Stability: development - CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") - // local_one - // Stability: development - CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") - // any - // Stability: development - CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") - // serial - // Stability: development - CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") - // local_serial - // Stability: development - CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") -) - -// Namespace: cicd -const ( - // CICDPipelineNameKey is the attribute Key conforming to the - // "cicd.pipeline.name" semantic conventions. It represents the human readable - // name of the pipeline within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Build and Test", "Lint", "Deploy Go Project", - // "deploy_to_environment" - CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") - - // CICDPipelineResultKey is the attribute Key conforming to the - // "cicd.pipeline.result" semantic conventions. It represents the result of a - // pipeline run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") - - // CICDPipelineRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.run.id" semantic conventions. It represents the unique - // identifier of a pipeline run within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "120912" - CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") - - // CICDPipelineRunStateKey is the attribute Key conforming to the - // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline - // run goes through these states during its lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pending", "executing", "finalizing" - CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") - - // CICDPipelineTaskNameKey is the attribute Key conforming to the - // "cicd.pipeline.task.name" semantic conventions. It represents the human - // readable name of a task within a pipeline. Task here most closely aligns with - // a [computing process] in a pipeline. Other terms for tasks include commands, - // steps, and procedures. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" - // - // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) - CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") - - // CICDPipelineTaskRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique - // identifier of a task run within a pipeline. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "12097" - CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") - - // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the - // [URL] of the pipeline run providing the complete address in order to locate - // and identify the pipeline run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") - - // CICDPipelineTaskTypeKey is the attribute Key conforming to the - // "cicd.pipeline.task.type" semantic conventions. It represents the type of the - // task within a pipeline. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "build", "test", "deploy" - CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") - - // CICDSystemComponentKey is the attribute Key conforming to the - // "cicd.system.component" semantic conventions. It represents the name of a - // component of the CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "controller", "scheduler", "agent" - CICDSystemComponentKey = attribute.Key("cicd.system.component") - - // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" - // semantic conventions. It represents the state of a CICD worker / agent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle", "busy", "down" - CICDWorkerStateKey = attribute.Key("cicd.worker.state") -) - -// CICDPipelineName returns an attribute KeyValue conforming to the -// "cicd.pipeline.name" semantic conventions. It represents the human readable -// name of the pipeline within a CI/CD system. -func CICDPipelineName(val string) attribute.KeyValue { - return CICDPipelineNameKey.String(val) -} - -// CICDPipelineRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.id" semantic conventions. It represents the unique -// identifier of a pipeline run within a CI/CD system. -func CICDPipelineRunID(val string) attribute.KeyValue { - return CICDPipelineRunIDKey.String(val) -} - -// CICDPipelineTaskName returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.name" semantic conventions. It represents the human -// readable name of a task within a pipeline. Task here most closely aligns with -// a [computing process] in a pipeline. Other terms for tasks include commands, -// steps, and procedures. -// -// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) -func CICDPipelineTaskName(val string) attribute.KeyValue { - return CICDPipelineTaskNameKey.String(val) -} - -// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique -// identifier of a task run within a pipeline. -func CICDPipelineTaskRunID(val string) attribute.KeyValue { - return CICDPipelineTaskRunIDKey.String(val) -} - -// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the -// [URL] of the pipeline run providing the complete address in order to locate -// and identify the pipeline run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { - return CICDPipelineTaskRunURLFullKey.String(val) -} - -// CICDSystemComponent returns an attribute KeyValue conforming to the -// "cicd.system.component" semantic conventions. It represents the name of a -// component of the CICD system. -func CICDSystemComponent(val string) attribute.KeyValue { - return CICDSystemComponentKey.String(val) -} - -// Enum values for cicd.pipeline.result -var ( - // The pipeline run finished successfully. - // Stability: development - CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") - // The pipeline run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the pipeline run. - // Stability: development - CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") - // The pipeline run failed due to an error in the CICD system, eg. due to the - // worker being killed. - // Stability: development - CICDPipelineResultError = CICDPipelineResultKey.String("error") - // A timeout caused the pipeline run to be interrupted. - // Stability: development - CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") - // The pipeline run was cancelled, eg. by a user manually cancelling the - // pipeline run. - // Stability: development - CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") - // The pipeline run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") -) - -// Enum values for cicd.pipeline.run.state -var ( - // The run pending state spans from the event triggering the pipeline run until - // the execution of the run starts (eg. time spent in a queue, provisioning - // agents, creating run resources). - // - // Stability: development - CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") - // The executing state spans the execution of any run tasks (eg. build, test). - // Stability: development - CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") - // The finalizing state spans from when the run has finished executing (eg. - // cleanup of run resources). - // Stability: development - CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") -) - -// Enum values for cicd.pipeline.task.type -var ( - // build - // Stability: development - CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") - // test - // Stability: development - CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") - // deploy - // Stability: development - CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") -) - -// Enum values for cicd.worker.state -var ( - // The worker is not performing work for the CICD system. It is available to the - // CICD system to perform work on (online / idle). - // Stability: development - CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") - // The worker is performing work for the CICD system. - // Stability: development - CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") - // The worker is not available to the CICD system (disconnected / down). - // Stability: development - CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") -) - -// Namespace: client -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.address` SHOULD represent the client address behind any - // intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" semantic - // conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.port` SHOULD represent the client port behind any - // intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the "client.address" -// semantic conventions. It represents the client address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// Namespace: cloud -const ( - // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" - // semantic conventions. It represents the cloud account ID the resource is - // assigned to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "111111111111", "opentelemetry" - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to increase - // availability. Availability zone represents the zone where the resource is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-east-1c" - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic - // conventions. It represents the geographical region the resource is running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1", "us-east-1" - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions], [AWS regions], [Azure regions], - // [Google Cloud regions], or [Tencent Cloud regions]. - // - // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm - // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ - // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ - // [Google Cloud regions]: https://cloud.google.com/about/locations - // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" - // semantic conventions. It represents the cloud provider-specific native - // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a - // [fully qualified resource ID] on Azure, a [full resource name] on GCP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", - // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", - // "/subscriptions//resourceGroups/ - // /providers/Microsoft.Web/sites//functions/" - // Note: On some cloud providers, it may not be possible to determine the full - // ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud provider. - // The following well-known definitions MUST be used if you set this attribute - // and they apply: - // - // - **AWS Lambda:** The function [ARN]. - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix] - // with the resolved function version, as the same runtime instance may be - // invocable with - // multiple different aliases. - // - **GCP:** The [URI of the resource] - // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, - // *not* the function app, having the form - // - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` - // . - // This means that a span attribute MUST be used, as an Azure function app - // can host multiple functions that would usually share - // a TracerProvider. - // - // - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id - // [full resource name]: https://cloud.google.com/apis/design/resource_names#full_resource_name - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html - // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names - // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" -// semantic conventions. It represents the geographical region the resource is -// running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] -// on GCP). -// -// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id -// [full resource name]: https://cloud.google.com/apis/design/resource_names#full_resource_name -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Enum values for cloud.platform -var ( - // Alibaba Cloud Elastic Compute Service - // Stability: development - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - // Stability: development - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - // Stability: development - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - // Stability: development - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - // Stability: development - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - // Stability: development - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - // Stability: development - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - // Stability: development - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - // Stability: development - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - // Stability: development - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - // Stability: development - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - // Stability: development - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - // Stability: development - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - // Stability: development - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - // Stability: development - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - // Stability: development - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - // Stability: development - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - // Stability: development - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - // Stability: development - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - // Stability: development - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - // Stability: development - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - // Stability: development - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - // Stability: development - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - // Stability: development - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - // Stability: development - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Compute on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") - // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudOke = CloudPlatformKey.String("oracle_cloud_oke") - // Tencent Cloud Cloud Virtual Machine (CVM) - // Stability: development - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - // Stability: development - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - // Stability: development - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// Enum values for cloud.provider -var ( - // Alibaba Cloud - // Stability: development - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - // Stability: development - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - // Stability: development - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - // Stability: development - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") - // Tencent Cloud - // Stability: development - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// Namespace: cloudevents -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the [event_id] - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" - // - // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the [source] - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", - // "my-service" - // - // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents specification] which the event uses. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - // - // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the [subject] - // of the event in the context of the event producer (identified by source). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: mynewfile.jpg - // - // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the [event_type] - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" - // - // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the [event_id] -// uniquely identifies the event. -// -// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the [source] -// identifies the context in which an event happened. -// -// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to the -// "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents specification] which the event uses. -// -// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the [subject] -// of the event in the context of the event producer (identified by source). -// -// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the [event_type] -// contains a value describing the type of event related to the originating -// occurrence. -// -// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Namespace: cloudfoundry -const ( - // CloudfoundryAppIDKey is the attribute Key conforming to the - // "cloudfoundry.app.id" semantic conventions. It represents the guid of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_id`. This is the same value as - // reported by `cf app --guid`. - CloudfoundryAppIDKey = attribute.Key("cloudfoundry.app.id") - - // CloudfoundryAppInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.app.instance.id" semantic conventions. It represents the index - // of the application instance. 0 when just one instance is active. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0", "1" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the application instance index for applications - // deployed on the runtime. - // - // Application instrumentation should use the value from environment - // variable `CF_INSTANCE_INDEX`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - CloudfoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") - - // CloudfoundryAppNameKey is the attribute Key conforming to the - // "cloudfoundry.app.name" semantic conventions. It represents the name of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-app-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_name`. This is the same value - // as reported by `cf apps`. - CloudfoundryAppNameKey = attribute.Key("cloudfoundry.app.name") - - // CloudfoundryOrgIDKey is the attribute Key conforming to the - // "cloudfoundry.org.id" semantic conventions. It represents the guid of the - // CloudFoundry org the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_id`. This is the same value as - // reported by `cf org --guid`. - CloudfoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") - - // CloudfoundryOrgNameKey is the attribute Key conforming to the - // "cloudfoundry.org.name" semantic conventions. It represents the name of the - // CloudFoundry organization the app is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_name`. This is the same value as - // reported by `cf orgs`. - CloudfoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") - - // CloudfoundryProcessIDKey is the attribute Key conforming to the - // "cloudfoundry.process.id" semantic conventions. It represents the UID - // identifying the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to - // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. - // For system components, this could be the actual PID. - CloudfoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") - - // CloudfoundryProcessTypeKey is the attribute Key conforming to the - // "cloudfoundry.process.type" semantic conventions. It represents the type of - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "web" - // Note: CloudFoundry applications can consist of multiple jobs. Usually the - // main process will be of type `web`. There can be additional background - // tasks or side-cars with different process types. - CloudfoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") - - // CloudfoundrySpaceIDKey is the attribute Key conforming to the - // "cloudfoundry.space.id" semantic conventions. It represents the guid of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_id`. This is the same value as - // reported by `cf space --guid`. - CloudfoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") - - // CloudfoundrySpaceNameKey is the attribute Key conforming to the - // "cloudfoundry.space.name" semantic conventions. It represents the name of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-space-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_name`. This is the same value as - // reported by `cf spaces`. - CloudfoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") - - // CloudfoundrySystemIDKey is the attribute Key conforming to the - // "cloudfoundry.system.id" semantic conventions. It represents a guid or - // another name describing the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cf/gorouter" - // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the component name, e.g. "gorouter", for - // CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.id` should be set to - // `spec.deployment/spec.name`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudfoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") - - // CloudfoundrySystemInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid - // describing the concrete instance of the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the vm id for CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.instance.id` should be set to `spec.id`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudfoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") -) - -// CloudfoundryAppID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.id" semantic conventions. It represents the guid of the -// application. -func CloudfoundryAppID(val string) attribute.KeyValue { - return CloudfoundryAppIDKey.String(val) -} - -// CloudfoundryAppInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.instance.id" semantic conventions. It represents the index -// of the application instance. 0 when just one instance is active. -func CloudfoundryAppInstanceID(val string) attribute.KeyValue { - return CloudfoundryAppInstanceIDKey.String(val) -} - -// CloudfoundryAppName returns an attribute KeyValue conforming to the -// "cloudfoundry.app.name" semantic conventions. It represents the name of the -// application. -func CloudfoundryAppName(val string) attribute.KeyValue { - return CloudfoundryAppNameKey.String(val) -} - -// CloudfoundryOrgID returns an attribute KeyValue conforming to the -// "cloudfoundry.org.id" semantic conventions. It represents the guid of the -// CloudFoundry org the application is running in. -func CloudfoundryOrgID(val string) attribute.KeyValue { - return CloudfoundryOrgIDKey.String(val) -} - -// CloudfoundryOrgName returns an attribute KeyValue conforming to the -// "cloudfoundry.org.name" semantic conventions. It represents the name of the -// CloudFoundry organization the app is running in. -func CloudfoundryOrgName(val string) attribute.KeyValue { - return CloudfoundryOrgNameKey.String(val) -} - -// CloudfoundryProcessID returns an attribute KeyValue conforming to the -// "cloudfoundry.process.id" semantic conventions. It represents the UID -// identifying the process. -func CloudfoundryProcessID(val string) attribute.KeyValue { - return CloudfoundryProcessIDKey.String(val) -} - -// CloudfoundryProcessType returns an attribute KeyValue conforming to the -// "cloudfoundry.process.type" semantic conventions. It represents the type of -// process. -func CloudfoundryProcessType(val string) attribute.KeyValue { - return CloudfoundryProcessTypeKey.String(val) -} - -// CloudfoundrySpaceID returns an attribute KeyValue conforming to the -// "cloudfoundry.space.id" semantic conventions. It represents the guid of the -// CloudFoundry space the application is running in. -func CloudfoundrySpaceID(val string) attribute.KeyValue { - return CloudfoundrySpaceIDKey.String(val) -} - -// CloudfoundrySpaceName returns an attribute KeyValue conforming to the -// "cloudfoundry.space.name" semantic conventions. It represents the name of the -// CloudFoundry space the application is running in. -func CloudfoundrySpaceName(val string) attribute.KeyValue { - return CloudfoundrySpaceNameKey.String(val) -} - -// CloudfoundrySystemID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.id" semantic conventions. It represents a guid or another -// name describing the event source. -func CloudfoundrySystemID(val string) attribute.KeyValue { - return CloudfoundrySystemIDKey.String(val) -} - -// CloudfoundrySystemInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid -// describing the concrete instance of the event source. -func CloudfoundrySystemInstanceID(val string) attribute.KeyValue { - return CloudfoundrySystemInstanceIDKey.String(val) -} - -// Namespace: code -const ( - // CodeColumnNumberKey is the attribute Key conforming to the - // "code.column.number" semantic conventions. It represents the column number in - // `code.file.path` best representing the operation. It SHOULD point within the - // code unit named in `code.function.name`. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - CodeColumnNumberKey = attribute.Key("code.column.number") - - // CodeFilePathKey is the attribute Key conforming to the "code.file.path" - // semantic conventions. It represents the source code file name that identifies - // the code unit as uniquely as possible (preferably an absolute file path). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: /usr/local/MyApplication/content_root/app/index.php - CodeFilePathKey = attribute.Key("code.file.path") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the deprecated, use `code.file.path` - // instead. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: /usr/local/MyApplication/content_root/app/index.php - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionNameKey is the attribute Key conforming to the - // "code.function.name" semantic conventions. It represents the method or - // function name, or equivalent (usually rightmost part of the code unit's - // name). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: serveRequest - CodeFunctionNameKey = attribute.Key("code.function.name") - - // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" - // semantic conventions. It represents the line number in `code.file.path` best - // representing the operation. It SHOULD point within the code unit named in - // `code.function.name`. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - CodeLineNumberKey = attribute.Key("code.line.number") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function.name` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function.name` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: com.example.MyHttpService - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" - // semantic conventions. It represents a stacktrace as a string in the natural - // representation for the language runtime. The representation is to be - // determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumnNumber returns an attribute KeyValue conforming to the -// "code.column.number" semantic conventions. It represents the column number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. -func CodeColumnNumber(val int) attribute.KeyValue { - return CodeColumnNumberKey.Int(val) -} - -// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" -// semantic conventions. It represents the source code file name that identifies -// the code unit as uniquely as possible (preferably an absolute file path). -func CodeFilePath(val string) attribute.KeyValue { - return CodeFilePathKey.String(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the "code.filepath" -// semantic conventions. It represents the deprecated, use `code.file.path` -// instead. -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunctionName returns an attribute KeyValue conforming to the -// "code.function.name" semantic conventions. It represents the method or -// function name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunctionName(val string) attribute.KeyValue { - return CodeFunctionNameKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the -// "code.line.number" semantic conventions. It represents the line number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the "code.namespace" -// semantic conventions. It represents the "namespace" within which -// `code.function.name` is defined. Usually the qualified class or module name, -// such that `code.namespace` + some separator + `code.function.name` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a string -// in the natural representation for the language runtime. The representation is -// to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// Namespace: container -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used to - // run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol" - // Note: If using embedded credentials or sensitive data, it is recommended to - // remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol", "--config", "config.yaml" - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full command - // run by the container as a single string representing the full command. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol --config config.yaml" - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCsiPluginNameKey is the attribute Key conforming to the - // "container.csi.plugin.name" semantic conventions. It represents the name of - // the CSI ([Container Storage Interface]) plugin used by the volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pd.csi.storage.gke.io" - // Note: This can sometimes be referred to as a "driver" in CSI implementations. - // This should represent the `name` field of the GetPluginInfo RPC. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCsiPluginNameKey = attribute.Key("container.csi.plugin.name") - - // ContainerCsiVolumeIDKey is the attribute Key conforming to the - // "container.csi.volume.id" semantic conventions. It represents the unique - // volume ID returned by the CSI ([Container Storage Interface]) plugin. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" - // Note: This can sometimes be referred to as a "volume handle" in CSI - // implementations. This should represent the `Volume.volume_id` field in CSI - // spec. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCsiVolumeIDKey = attribute.Key("container.csi.volume.id") - - // ContainerIDKey is the attribute Key conforming to the "container.id" semantic - // conventions. It represents the container ID. Usually a UUID, as for example - // used to [identify Docker containers]. The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a3bf90e006b2" - // - // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime specific - // image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect [API] - // endpoint. - // K8s defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` - // . - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - // - // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of the - // image the container was built on. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gcr.io/opentelemetry/operator" - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the repo - // digests of the container image as provided by the container runtime. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", - // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - // Note: [Docker] and [CRI] report those under the `RepoDigests` field. - // - // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect - // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image Inspect]. Should be only - // the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v1.27.1", "3.5.7-0" - // - // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-autoconf" - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container runtime - // managing this container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "docker", "containerd", "rkt" - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full command -// run by the container as a single string representing the full command. -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerCsiPluginName returns an attribute KeyValue conforming to the -// "container.csi.plugin.name" semantic conventions. It represents the name of -// the CSI ([Container Storage Interface]) plugin used by the volume. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCsiPluginName(val string) attribute.KeyValue { - return ContainerCsiPluginNameKey.String(val) -} - -// ContainerCsiVolumeID returns an attribute KeyValue conforming to the -// "container.csi.volume.id" semantic conventions. It represents the unique -// volume ID returned by the CSI ([Container Storage Interface]) plugin. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCsiVolumeID(val string) attribute.KeyValue { - return ContainerCsiVolumeIDKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the "container.id" -// semantic conventions. It represents the container ID. Usually a UUID, as for -// example used to [identify Docker containers]. The UUID might be abbreviated. -// -// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime specific -// image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container image -// tags. An example can be found in [Docker Image Inspect]. Should be only the -// `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -// -// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the "container.name" -// semantic conventions. It represents the container name used by container -// runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container runtime -// managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Namespace: cpu -const ( - // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic - // conventions. It represents the mode of the CPU. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "user", "system" - CPUModeKey = attribute.Key("cpu.mode") -) - -// Enum values for cpu.mode -var ( - // user - // Stability: development - CPUModeUser = CPUModeKey.String("user") - // system - // Stability: development - CPUModeSystem = CPUModeKey.String("system") - // nice - // Stability: development - CPUModeNice = CPUModeKey.String("nice") - // idle - // Stability: development - CPUModeIdle = CPUModeKey.String("idle") - // iowait - // Stability: development - CPUModeIowait = CPUModeKey.String("iowait") - // interrupt - // Stability: development - CPUModeInterrupt = CPUModeKey.String("interrupt") - // steal - // Stability: development - CPUModeSteal = CPUModeKey.String("steal") - // kernel - // Stability: development - CPUModeKernel = CPUModeKey.String("kernel") -) - -// Namespace: db -const ( - // DBClientConnectionPoolNameKey is the attribute Key conforming to the - // "db.client.connection.pool.name" semantic conventions. It represents the name - // of the connection pool; unique within the instrumented application. In case - // the connection pool implementation doesn't provide a name, instrumentation - // SHOULD use a combination of parameters that would make the name unique, for - // example, combining attributes `server.address`, `server.port`, and - // `db.namespace`, formatted as `server.address:server.port/db.namespace`. - // Instrumentations that generate connection pool name following different - // patterns SHOULD document it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myDataSource" - DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") - - // DBClientConnectionStateKey is the attribute Key conforming to the - // "db.client.connection.state" semantic conventions. It represents the state of - // a connection in the pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle" - DBClientConnectionStateKey = attribute.Key("db.client.connection.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "public.users", "customers" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The collection name SHOULD NOT be extracted from `db.query.text`, - // unless the query format is known to only ever have a single collection name - // present. - // - // For batch operations, if the individual operations are known to have the same - // collection name - // then that collection name SHOULD be used. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic - // conventions. It represents the name of the database, fully qualified within - // the server address and port. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "customers", "test.users" - // Note: If a database system has multiple namespace components, they SHOULD be - // concatenated (potentially using database system specific conventions) from - // most general to most specific namespace component, and more specific - // namespaces SHOULD NOT be captured without the more general namespaces, to - // ensure that "startswith" queries for the more general namespaces will be - // valid. - // Semantic conventions for individual database systems SHOULD document what - // `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application without - // attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationBatchSizeKey is the attribute Key conforming to the - // "db.operation.batch.size" semantic conventions. It represents the number of - // queries included in a batch operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: 2, 3, 4 - // Note: Operations are only considered batches when they contain two or more - // operations, and so `db.operation.batch.size` SHOULD never be `1`. - DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") - - // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" - // semantic conventions. It represents the name of the operation or command - // being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "findAndModify", "HMSET", "SELECT" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The operation name SHOULD NOT be extracted from `db.query.text`, - // unless the query format is known to only ever have a single operation name - // present. - // - // For batch operations, if the individual operations are known to have the same - // operation name - // then that operation name SHOULD be used prepended by `BATCH `, - // otherwise `db.operation.name` SHOULD be `BATCH` or some other database - // system specific term if more applicable. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" - // semantic conventions. It represents the low cardinality representation of a - // database query text. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get - // user by id" - // Note: `db.query.summary` provides static summary of the query text. It - // describes a class of database queries and is useful as a grouping key, - // especially when analyzing telemetry for database calls involving complex - // queries. - // Summary may be available to the instrumentation through instrumentation hooks - // or other means. If it is not available, instrumentations that support query - // parsing SHOULD generate a summary following [Generating query summary] - // section. - // - // [Generating query summary]: ../../docs/database/database-spans.md#generating-a-summary-of-the-query-text - DBQuerySummaryKey = attribute.Key("db.query.summary") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" - // Note: For sanitization see [Sanitization of `db.query.text`]. - // For batch operations, if the individual operations are known to have the same - // query text then that query text SHOULD be used, otherwise all of the - // individual query texts SHOULD be concatenated with separator `; ` or some - // other database system specific separator if more applicable. - // Even though parameterized query text can potentially have sensitive data, by - // using a parameterized query the user is giving a strong signal that any - // sensitive data will be passed as parameter values, and the benefit to - // observability of capturing the static part of the query text by default - // outweighs the risk. - // - // [Sanitization of `db.query.text`]: ../../docs/database/database-spans.md#sanitization-of-dbquerytext - DBQueryTextKey = attribute.Key("db.query.text") - - // DBResponseReturnedRowsKey is the attribute Key conforming to the - // "db.response.returned_rows" semantic conventions. It represents the number of - // rows returned by the operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10, 30, 1000 - DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") - - // DBResponseStatusCodeKey is the attribute Key conforming to the - // "db.response.status_code" semantic conventions. It represents the database - // response status code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: "102", "ORA-17002", "08P01", "404" - // Note: The status code returned by the database. Usually it represents an - // error code, but may also represent partial success, warning, or differentiate - // between various types of successful outcomes. - // Semantic conventions for individual database systems SHOULD document what - // `db.response.status_code` means in the context of that system. - DBResponseStatusCodeKey = attribute.Key("db.response.status_code") - - // DBSystemNameKey is the attribute Key conforming to the "db.system.name" - // semantic conventions. It represents the database management system (DBMS) - // product as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Release_Candidate - // - // Examples: - // Note: The actual DBMS may differ from the one identified by the client. For - // example, when using PostgreSQL client libraries to connect to a CockroachDB, - // the `db.system.name` is set to `postgresql` based on the instrumentation's - // best knowledge. - DBSystemNameKey = attribute.Key("db.system.name") -) - -// DBClientConnectionPoolName returns an attribute KeyValue conforming to the -// "db.client.connection.pool.name" semantic conventions. It represents the name -// of the connection pool; unique within the instrumented application. In case -// the connection pool implementation doesn't provide a name, instrumentation -// SHOULD use a combination of parameters that would make the name unique, for -// example, combining attributes `server.address`, `server.port`, and -// `db.namespace`, formatted as `server.address:server.port/db.namespace`. -// Instrumentations that generate connection pool name following different -// patterns SHOULD document it. -func DBClientConnectionPoolName(val string) attribute.KeyValue { - return DBClientConnectionPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" -// semantic conventions. It represents the name of the database, fully qualified -// within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationBatchSize returns an attribute KeyValue conforming to the -// "db.operation.batch.size" semantic conventions. It represents the number of -// queries included in a batch operation. -func DBOperationBatchSize(val int) attribute.KeyValue { - return DBOperationBatchSizeKey.Int(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBQuerySummary returns an attribute KeyValue conforming to the -// "db.query.summary" semantic conventions. It represents the low cardinality -// representation of a database query text. -func DBQuerySummary(val string) attribute.KeyValue { - return DBQuerySummaryKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" -// semantic conventions. It represents the database query being executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// DBResponseReturnedRows returns an attribute KeyValue conforming to the -// "db.response.returned_rows" semantic conventions. It represents the number of -// rows returned by the operation. -func DBResponseReturnedRows(val int) attribute.KeyValue { - return DBResponseReturnedRowsKey.Int(val) -} - -// DBResponseStatusCode returns an attribute KeyValue conforming to the -// "db.response.status_code" semantic conventions. It represents the database -// response status code. -func DBResponseStatusCode(val string) attribute.KeyValue { - return DBResponseStatusCodeKey.String(val) -} - -// Enum values for db.client.connection.state -var ( - // idle - // Stability: development - DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") - // used - // Stability: development - DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") -) - -// Enum values for db.system.name -var ( - // Some other SQL database. Fallback only. - // Stability: development - DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") - // [Adabas (Adaptable Database System)] - // Stability: development - // - // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas - DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") - // [Actian Ingres] - // Stability: development - // - // [Actian Ingres]: https://www.actian.com/databases/ingres/ - DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") - // [Amazon DynamoDB] - // Stability: development - // - // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ - DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") - // [Amazon Redshift] - // Stability: development - // - // [Amazon Redshift]: https://aws.amazon.com/redshift/ - DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") - // [Azure Cosmos DB] - // Stability: development - // - // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db - DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") - // [InterSystems Caché] - // Stability: development - // - // [InterSystems Caché]: https://www.intersystems.com/products/cache/ - DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") - // [Apache Cassandra] - // Stability: development - // - // [Apache Cassandra]: https://cassandra.apache.org/ - DBSystemNameCassandra = DBSystemNameKey.String("cassandra") - // [ClickHouse] - // Stability: development - // - // [ClickHouse]: https://clickhouse.com/ - DBSystemNameClickhouse = DBSystemNameKey.String("clickhouse") - // [CockroachDB] - // Stability: development - // - // [CockroachDB]: https://www.cockroachlabs.com/ - DBSystemNameCockroachdb = DBSystemNameKey.String("cockroachdb") - // [Couchbase] - // Stability: development - // - // [Couchbase]: https://www.couchbase.com/ - DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") - // [Apache CouchDB] - // Stability: development - // - // [Apache CouchDB]: https://couchdb.apache.org/ - DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") - // [Apache Derby] - // Stability: development - // - // [Apache Derby]: https://db.apache.org/derby/ - DBSystemNameDerby = DBSystemNameKey.String("derby") - // [Elasticsearch] - // Stability: development - // - // [Elasticsearch]: https://www.elastic.co/elasticsearch - DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") - // [Firebird] - // Stability: development - // - // [Firebird]: https://www.firebirdsql.org/ - DBSystemNameFirebirdsql = DBSystemNameKey.String("firebirdsql") - // [Google Cloud Spanner] - // Stability: development - // - // [Google Cloud Spanner]: https://cloud.google.com/spanner - DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") - // [Apache Geode] - // Stability: development - // - // [Apache Geode]: https://geode.apache.org/ - DBSystemNameGeode = DBSystemNameKey.String("geode") - // [H2 Database] - // Stability: development - // - // [H2 Database]: https://h2database.com/ - DBSystemNameH2database = DBSystemNameKey.String("h2database") - // [Apache HBase] - // Stability: development - // - // [Apache HBase]: https://hbase.apache.org/ - DBSystemNameHBase = DBSystemNameKey.String("hbase") - // [Apache Hive] - // Stability: development - // - // [Apache Hive]: https://hive.apache.org/ - DBSystemNameHive = DBSystemNameKey.String("hive") - // [HyperSQL Database] - // Stability: development - // - // [HyperSQL Database]: https://hsqldb.org/ - DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") - // [IBM Db2] - // Stability: development - // - // [IBM Db2]: https://www.ibm.com/db2 - DBSystemNameIbmDb2 = DBSystemNameKey.String("ibm.db2") - // [IBM Informix] - // Stability: development - // - // [IBM Informix]: https://www.ibm.com/products/informix - DBSystemNameIbmInformix = DBSystemNameKey.String("ibm.informix") - // [IBM Netezza] - // Stability: development - // - // [IBM Netezza]: https://www.ibm.com/products/netezza - DBSystemNameIbmNetezza = DBSystemNameKey.String("ibm.netezza") - // [InfluxDB] - // Stability: development - // - // [InfluxDB]: https://www.influxdata.com/ - DBSystemNameInfluxdb = DBSystemNameKey.String("influxdb") - // [Instant] - // Stability: development - // - // [Instant]: https://www.instantdb.com/ - DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") - // [MariaDB] - // Stability: release_candidate - // - // [MariaDB]: https://mariadb.org/ - DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") - // [Memcached] - // Stability: development - // - // [Memcached]: https://memcached.org/ - DBSystemNameMemcached = DBSystemNameKey.String("memcached") - // [MongoDB] - // Stability: development - // - // [MongoDB]: https://www.mongodb.com/ - DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") - // [Microsoft SQL Server] - // Stability: release_candidate - // - // [Microsoft SQL Server]: https://www.microsoft.com/sql-server - DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") - // [MySQL] - // Stability: release_candidate - // - // [MySQL]: https://www.mysql.com/ - DBSystemNameMySQL = DBSystemNameKey.String("mysql") - // [Neo4j] - // Stability: development - // - // [Neo4j]: https://neo4j.com/ - DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") - // [OpenSearch] - // Stability: development - // - // [OpenSearch]: https://opensearch.org/ - DBSystemNameOpensearch = DBSystemNameKey.String("opensearch") - // [Oracle Database] - // Stability: development - // - // [Oracle Database]: https://www.oracle.com/database/ - DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") - // [PostgreSQL] - // Stability: release_candidate - // - // [PostgreSQL]: https://www.postgresql.org/ - DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") - // [Redis] - // Stability: development - // - // [Redis]: https://redis.io/ - DBSystemNameRedis = DBSystemNameKey.String("redis") - // [SAP HANA] - // Stability: development - // - // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html - DBSystemNameSapHana = DBSystemNameKey.String("sap.hana") - // [SAP MaxDB] - // Stability: development - // - // [SAP MaxDB]: https://maxdb.sap.com/ - DBSystemNameSapMaxDB = DBSystemNameKey.String("sap.maxdb") - // [SQLite] - // Stability: development - // - // [SQLite]: https://www.sqlite.org/ - DBSystemNameSqlite = DBSystemNameKey.String("sqlite") - // [Teradata] - // Stability: development - // - // [Teradata]: https://www.teradata.com/ - DBSystemNameTeradata = DBSystemNameKey.String("teradata") - // [Trino] - // Stability: development - // - // [Trino]: https://trino.io/ - DBSystemNameTrino = DBSystemNameKey.String("trino") -) - -// Namespace: deployment -const ( - // DeploymentEnvironmentNameKey is the attribute Key conforming to the - // "deployment.environment.name" semantic conventions. It represents the name of - // the [deployment environment] (aka deployment tier). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "staging", "production" - // Note: `deployment.environment.name` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` resource - // attributes. - // This implies that resources carrying the following attribute combinations - // MUST be - // considered to be identifying the same service: - // - // - `service.name=frontend`, `deployment.environment.name=production` - // - `service.name=frontend`, `deployment.environment.name=staging`. - // - // - // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment - DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") - - // DeploymentIDKey is the attribute Key conforming to the "deployment.id" - // semantic conventions. It represents the id of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1208" - DeploymentIDKey = attribute.Key("deployment.id") - - // DeploymentNameKey is the attribute Key conforming to the "deployment.name" - // semantic conventions. It represents the name of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "deploy my app", "deploy-frontend" - DeploymentNameKey = attribute.Key("deployment.name") - - // DeploymentStatusKey is the attribute Key conforming to the - // "deployment.status" semantic conventions. It represents the status of the - // deployment. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - DeploymentStatusKey = attribute.Key("deployment.status") -) - -// DeploymentEnvironmentName returns an attribute KeyValue conforming to the -// "deployment.environment.name" semantic conventions. It represents the name of -// the [deployment environment] (aka deployment tier). -// -// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment -func DeploymentEnvironmentName(val string) attribute.KeyValue { - return DeploymentEnvironmentNameKey.String(val) -} - -// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" -// semantic conventions. It represents the id of the deployment. -func DeploymentID(val string) attribute.KeyValue { - return DeploymentIDKey.String(val) -} - -// DeploymentName returns an attribute KeyValue conforming to the -// "deployment.name" semantic conventions. It represents the name of the -// deployment. -func DeploymentName(val string) attribute.KeyValue { - return DeploymentNameKey.String(val) -} - -// Enum values for deployment.status -var ( - // failed - // Stability: development - DeploymentStatusFailed = DeploymentStatusKey.String("failed") - // succeeded - // Stability: development - DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") -) - -// Namespace: destination -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the destination - // address - domain name if available without reverse DNS lookup; otherwise, IP - // address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the source side, and when communicating through an - // intermediary, `destination.address` SHOULD represent the destination address - // behind any intermediaries, for example proxies, if it's available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the "destination.port" - // semantic conventions. It represents the destination port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number. -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Namespace: device -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the - // [vendor identifier]. On Android (Java or Kotlin), this value MUST be equal to - // the Firebase Installation ID or a globally unique UUID which is persisted - // across sessions in your application. More information can be found [here] on - // best practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - // - // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor - // [here]: https://developer.android.com/training/articles/user-data-ids - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of the - // device manufacturer. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apple", "Samsung" - // Note: The Android OS provides this field via [Build]. iOS apps SHOULD - // hardcode the value `Apple`. - // - // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone3,4", "SM-G920F" - // Note: It's recommended this value represents a machine-readable version of - // the model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" - // semantic conventions. It represents the marketing name for the device model. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" - // Note: It's recommended this value represents a human-readable version of the - // device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic -// conventions. It represents a unique identifier representing the device. -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer. -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device. -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name for -// the device model. -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// Namespace: disk -const ( - // DiskIoDirectionKey is the attribute Key conforming to the "disk.io.direction" - // semantic conventions. It represents the disk IO operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "read" - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -// Enum values for disk.io.direction -var ( - // read - // Stability: development - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - // Stability: development - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// Namespace: dns -const ( - // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" - // semantic conventions. It represents the name being queried. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.example.com", "opentelemetry.io" - // Note: If the name field contains non-printable characters (below 32 or above - // 126), those characters should be represented as escaped base 10 integers - // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, - // and line feeds should be converted to \t, \r, and \n respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Namespace: elasticsearch -const ( - // ElasticsearchNodeNameKey is the attribute Key conforming to the - // "elasticsearch.node.name" semantic conventions. It represents the represents - // the human-readable identifier of the node/instance to which a request was - // routed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-0000000001" - ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") -) - -// ElasticsearchNodeName returns an attribute KeyValue conforming to the -// "elasticsearch.node.name" semantic conventions. It represents the represents -// the human-readable identifier of the node/instance to which a request was -// routed. -func ElasticsearchNodeName(val string) attribute.KeyValue { - return ElasticsearchNodeNameKey.String(val) -} - -// Namespace: error -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic - // conventions. It represents the describes a class of error the operation ended - // with. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "timeout", "java.net.UnknownHostException", - // "server_certificate_invalid", "500" - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library SHOULD be - // low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query time - // when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT set - // `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as HTTP - // or gRPC status codes), - // it's RECOMMENDED to: - // - // - Use a domain-specific attribute - // - Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -// Enum values for error.type -var ( - // A fallback error value to be used when the instrumentation doesn't define a - // custom value. - // - // Stability: stable - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Namespace: exception -const ( - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: Exception in thread "main" java.lang.RuntimeException: Test - // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the exception - // should be preferred over the static type in languages that support it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "java.net.ConnectException", "OSError" - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the "exception.type" -// semantic conventions. It represents the type of the exception (its -// fully-qualified class name, if applicable). The dynamic type of the exception -// should be preferred over the static type in languages that support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Namespace: faas -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the serverless - // function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron Expression]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0/5 * * * ? * - // - // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name of - // the source on which the triggering operation was performed. For example, in - // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the - // database name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myBucketName", "myDbName" - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or S3 is - // the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myFile.txt", "myTableName" - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the describes - // the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string containing - // the time when the data was accessed in the [ISO 8601] format expressed in - // [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a string, - // that will be potentially reused for other invocations to the same - // function/function version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" - // Note: - **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation ID of - // the current function invocation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" - // semantic conventions. It represents the name of the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: my-function - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud region of - // the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: eu-central-1 - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" - // semantic conventions. It represents the amount of memory available to the - // serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information (which must be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this runtime - // instance executes. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-function", "myazurefunctionapp/some-function-name" - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function.name`] - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - // - // - **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - // - // - // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation time - // in the [ISO 8601] format expressed in [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic - // conventions. It represents the type of the trigger which caused this function - // invocation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic - // conventions. It represents the immutable version of the function being - // executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "26", "pinkfroid-00002" - // Note: Depending on the cloud provider and platform, use: - // - // - **AWS Lambda:** The [function version] - // (an integer represented as a decimal string). - // - **Google Cloud Run (Services):** The [revision] - // (i.e., the function name plus the revision suffix). - // - **Google Cloud Functions:** The value of the - // [`K_REVISION` environment variable]. - // - **Azure Functions:** Not applicable. Do not set this attribute. - // - // - // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html - // [revision]: https://cloud.google.com/run/docs/managing/revisions - // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" -// semantic conventions. It represents a boolean that is true if the serverless -// function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic -// conventions. It represents a string containing the schedule period as -// [Cron Expression]. -// -// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of the -// source on which the triggering operation was performed. For example, in Cloud -// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database -// name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 is -// the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO 8601] format expressed in -// [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" -// semantic conventions. It represents the execution environment ID as a string, -// that will be potentially reused for other invocations to the same -// function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID of -// the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region of -// the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic -// conventions. It represents the name of the single function that this runtime -// instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic -// conventions. It represents a string containing the function invocation time in -// the [ISO 8601] format expressed in [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" -// semantic conventions. It represents the immutable version of the function -// being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Enum values for faas.document.operation -var ( - // When a new object is created. - // Stability: development - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified. - // Stability: development - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted. - // Stability: development - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Enum values for faas.invoked_provider -var ( - // Alibaba Cloud - // Stability: development - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - // Stability: development - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - // Stability: development - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// Enum values for faas.trigger -var ( - // A response to some data source operation such as a database or filesystem - // read/write - // Stability: development - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - // Stability: development - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - // Stability: development - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - // Stability: development - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - // Stability: development - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Namespace: feature_flag -const ( - // FeatureFlagContextIDKey is the attribute Key conforming to the - // "feature_flag.context.id" semantic conventions. It represents the unique - // identifier for the flag evaluation context. For example, the targeting key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" - FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") - - // FeatureFlagEvaluationErrorMessageKey is the attribute Key conforming to the - // "feature_flag.evaluation.error.message" semantic conventions. It represents a - // message explaining the nature of an error occurring during flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Flag `header-color` expected type `string` but found type `number` - // " - FeatureFlagEvaluationErrorMessageKey = attribute.Key("feature_flag.evaluation.error.message") - - // FeatureFlagEvaluationReasonKey is the attribute Key conforming to the - // "feature_flag.evaluation.reason" semantic conventions. It represents the - // reason code which shows how a feature flag value was determined. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "static", "targeting_match", "error", "default" - FeatureFlagEvaluationReasonKey = attribute.Key("feature_flag.evaluation.reason") - - // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" - // semantic conventions. It represents the lookup key of the feature flag. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "logo-color" - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // identifies the feature flag provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Flag Manager" - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagSetIDKey is the attribute Key conforming to the - // "feature_flag.set.id" semantic conventions. It represents the identifier of - // the [flag set] to which the feature flag belongs. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "proj-1", "ab98sgs", "service1/dev" - // - // [flag set]: https://openfeature.dev/specification/glossary/#flag-set - FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents a semantic - // identifier for an evaluated flag value. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "red", "true", "on" - // Note: A semantic identifier, commonly referred to as a variant, provides a - // means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") - - // FeatureFlagVersionKey is the attribute Key conforming to the - // "feature_flag.version" semantic conventions. It represents the version of the - // ruleset used during the evaluation. This may be any stable value which - // uniquely identifies the ruleset. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1", "01ABCDEF" - FeatureFlagVersionKey = attribute.Key("feature_flag.version") -) - -// FeatureFlagContextID returns an attribute KeyValue conforming to the -// "feature_flag.context.id" semantic conventions. It represents the unique -// identifier for the flag evaluation context. For example, the targeting key. -func FeatureFlagContextID(val string) attribute.KeyValue { - return FeatureFlagContextIDKey.String(val) -} - -// FeatureFlagEvaluationErrorMessage returns an attribute KeyValue conforming to -// the "feature_flag.evaluation.error.message" semantic conventions. It -// represents a message explaining the nature of an error occurring during flag -// evaluation. -func FeatureFlagEvaluationErrorMessage(val string) attribute.KeyValue { - return FeatureFlagEvaluationErrorMessageKey.String(val) -} - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the lookup key of the -// feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the -// identifies the feature flag provider. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagSetID returns an attribute KeyValue conforming to the -// "feature_flag.set.id" semantic conventions. It represents the identifier of -// the [flag set] to which the feature flag belongs. -// -// [flag set]: https://openfeature.dev/specification/glossary/#flag-set -func FeatureFlagSetID(val string) attribute.KeyValue { - return FeatureFlagSetIDKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents a semantic -// identifier for an evaluated flag value. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// FeatureFlagVersion returns an attribute KeyValue conforming to the -// "feature_flag.version" semantic conventions. It represents the version of the -// ruleset used during the evaluation. This may be any stable value which -// uniquely identifies the ruleset. -func FeatureFlagVersion(val string) attribute.KeyValue { - return FeatureFlagVersionKey.String(val) -} - -// Enum values for feature_flag.evaluation.reason -var ( - // The resolved value is static (no dynamic evaluation). - // Stability: development - FeatureFlagEvaluationReasonStatic = FeatureFlagEvaluationReasonKey.String("static") - // The resolved value fell back to a pre-configured value (no dynamic evaluation - // occurred or dynamic evaluation yielded no result). - // Stability: development - FeatureFlagEvaluationReasonDefault = FeatureFlagEvaluationReasonKey.String("default") - // The resolved value was the result of a dynamic evaluation, such as a rule or - // specific user-targeting. - // Stability: development - FeatureFlagEvaluationReasonTargetingMatch = FeatureFlagEvaluationReasonKey.String("targeting_match") - // The resolved value was the result of pseudorandom assignment. - // Stability: development - FeatureFlagEvaluationReasonSplit = FeatureFlagEvaluationReasonKey.String("split") - // The resolved value was retrieved from cache. - // Stability: development - FeatureFlagEvaluationReasonCached = FeatureFlagEvaluationReasonKey.String("cached") - // The resolved value was the result of the flag being disabled in the - // management system. - // Stability: development - FeatureFlagEvaluationReasonDisabled = FeatureFlagEvaluationReasonKey.String("disabled") - // The reason for the resolved value could not be determined. - // Stability: development - FeatureFlagEvaluationReasonUnknown = FeatureFlagEvaluationReasonKey.String("unknown") - // The resolved value is non-authoritative or possibly out of date - // Stability: development - FeatureFlagEvaluationReasonStale = FeatureFlagEvaluationReasonKey.String("stale") - // The resolved value was the result of an error. - // Stability: development - FeatureFlagEvaluationReasonError = FeatureFlagEvaluationReasonKey.String("error") -) - -// Namespace: file -const ( - // FileAccessedKey is the attribute Key conforming to the "file.accessed" - // semantic conventions. It represents the time when the file was last accessed, - // in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileAccessedKey = attribute.Key("file.accessed") - - // FileAttributesKey is the attribute Key conforming to the "file.attributes" - // semantic conventions. It represents the array of file attributes. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "readonly", "hidden" - // Note: Attributes names depend on the OS or file system. Here’s a - // non-exhaustive list of values expected for this attribute: `archive`, - // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, - // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, - // `write`. - FileAttributesKey = attribute.Key("file.attributes") - - // FileChangedKey is the attribute Key conforming to the "file.changed" semantic - // conventions. It represents the time when the file attributes or metadata was - // last changed, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: `file.changed` captures the time when any of the file's properties or - // attributes (including the content) are changed, while `file.modified` - // captures the timestamp when the file content is modified. - FileChangedKey = attribute.Key("file.changed") - - // FileCreatedKey is the attribute Key conforming to the "file.created" semantic - // conventions. It represents the time when the file was created, in ISO 8601 - // format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileCreatedKey = attribute.Key("file.created") - - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is located. - // It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/user", "C:\Program Files\MyApp" - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the leading - // dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: When the file name has multiple extensions (example.tar.gz), only the - // last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileForkNameKey is the attribute Key conforming to the "file.fork_name" - // semantic conventions. It represents the name of the fork. A fork is - // additional data associated with a filesystem object. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Zone.Identifer" - // Note: On Linux, a resource fork is used to store additional data with a - // filesystem object. A file always has at least one fork for the data portion, - // and additional forks may exist. - // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default - // data stream for a file is just called $DATA. Zone.Identifier is commonly used - // by Windows to track contents downloaded from the Internet. An ADS is - // typically of the form: C:\path\to\filename.extension:some_fork_name, and - // some_fork_name is the value that should populate `fork_name`. - // `filename.extension` should populate `file.name`, and `extension` should - // populate `file.extension`. The full path, `file.path`, will include the fork - // name. - FileForkNameKey = attribute.Key("file.fork_name") - - // FileGroupIDKey is the attribute Key conforming to the "file.group.id" - // semantic conventions. It represents the primary Group ID (GID) of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileGroupIDKey = attribute.Key("file.group.id") - - // FileGroupNameKey is the attribute Key conforming to the "file.group.name" - // semantic conventions. It represents the primary group name of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "users" - FileGroupNameKey = attribute.Key("file.group.name") - - // FileInodeKey is the attribute Key conforming to the "file.inode" semantic - // conventions. It represents the inode representing the file in the filesystem. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "256383" - FileInodeKey = attribute.Key("file.inode") - - // FileModeKey is the attribute Key conforming to the "file.mode" semantic - // conventions. It represents the mode of the file in octal representation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0640" - FileModeKey = attribute.Key("file.mode") - - // FileModifiedKey is the attribute Key conforming to the "file.modified" - // semantic conventions. It represents the time when the file content was last - // modified, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - FileModifiedKey = attribute.Key("file.modified") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.png" - FileNameKey = attribute.Key("file.name") - - // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" - // semantic conventions. It represents the user ID (UID) or security identifier - // (SID) of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileOwnerIDKey = attribute.Key("file.owner.id") - - // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" - // semantic conventions. It represents the username of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - FileOwnerNameKey = attribute.Key("file.owner.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FileSizeKey = attribute.Key("file.size") - - // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the - // "file.symbolic_link.target_path" semantic conventions. It represents the path - // to the target of a symbolic link. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/python3" - // Note: This attribute is only applicable to symbolic links. - FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") -) - -// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" -// semantic conventions. It represents the time when the file was last accessed, -// in ISO 8601 format. -func FileAccessed(val string) attribute.KeyValue { - return FileAccessedKey.String(val) -} - -// FileAttributes returns an attribute KeyValue conforming to the -// "file.attributes" semantic conventions. It represents the array of file -// attributes. -func FileAttributes(val ...string) attribute.KeyValue { - return FileAttributesKey.StringSlice(val) -} - -// FileChanged returns an attribute KeyValue conforming to the "file.changed" -// semantic conventions. It represents the time when the file attributes or -// metadata was last changed, in ISO 8601 format. -func FileChanged(val string) attribute.KeyValue { - return FileChangedKey.String(val) -} - -// FileCreated returns an attribute KeyValue conforming to the "file.created" -// semantic conventions. It represents the time when the file was created, in ISO -// 8601 format. -func FileCreated(val string) attribute.KeyValue { - return FileCreatedKey.String(val) -} - -// FileDirectory returns an attribute KeyValue conforming to the "file.directory" -// semantic conventions. It represents the directory where the file is located. -// It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the "file.extension" -// semantic conventions. It represents the file extension, excluding the leading -// dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" -// semantic conventions. It represents the name of the fork. A fork is additional -// data associated with a filesystem object. -func FileForkName(val string) attribute.KeyValue { - return FileForkNameKey.String(val) -} - -// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" -// semantic conventions. It represents the primary Group ID (GID) of the file. -func FileGroupID(val string) attribute.KeyValue { - return FileGroupIDKey.String(val) -} - -// FileGroupName returns an attribute KeyValue conforming to the -// "file.group.name" semantic conventions. It represents the primary group name -// of the file. -func FileGroupName(val string) attribute.KeyValue { - return FileGroupNameKey.String(val) -} - -// FileInode returns an attribute KeyValue conforming to the "file.inode" -// semantic conventions. It represents the inode representing the file in the -// filesystem. -func FileInode(val string) attribute.KeyValue { - return FileInodeKey.String(val) -} - -// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic -// conventions. It represents the mode of the file in octal representation. -func FileMode(val string) attribute.KeyValue { - return FileModeKey.String(val) -} - -// FileModified returns an attribute KeyValue conforming to the "file.modified" -// semantic conventions. It represents the time when the file content was last -// modified, in ISO 8601 format. -func FileModified(val string) attribute.KeyValue { - return FileModifiedKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" semantic -// conventions. It represents the name of the file including the extension, -// without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" -// semantic conventions. It represents the user ID (UID) or security identifier -// (SID) of the file owner. -func FileOwnerID(val string) attribute.KeyValue { - return FileOwnerIDKey.String(val) -} - -// FileOwnerName returns an attribute KeyValue conforming to the -// "file.owner.name" semantic conventions. It represents the username of the file -// owner. -func FileOwnerName(val string) attribute.KeyValue { - return FileOwnerNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" semantic -// conventions. It represents the full path to the file, including the file name. -// It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" semantic -// conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the -// "file.symbolic_link.target_path" semantic conventions. It represents the path -// to the target of a symbolic link. -func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { - return FileSymbolicLinkTargetPathKey.String(val) -} - -// Namespace: gcp -const ( - // GCPClientServiceKey is the attribute Key conforming to the - // "gcp.client.service" semantic conventions. It represents the identifies the - // Google Cloud service for which the official client library is intended. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "appengine", "run", "firestore", "alloydb", "spanner" - // Note: Intended to be a stable identifier for Google Cloud client libraries - // that is uniform across implementation languages. The value should be derived - // from the canonical service domain for the service; for example, - // 'foo.googleapis.com' should result in a value of 'foo'. - GCPClientServiceKey = attribute.Key("gcp.client.service") - - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of - // the Cloud Run [execution] being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`] environment variable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "job-name-xxxx", "sample-job-mdw84" - // - // [execution]: https://cloud.google.com/run/docs/managing/job-executions - // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index - // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] - // environment variable. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1 - // - // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") - - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname - // of a GCE instance. This is the full value of the default or [custom hostname] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-host1234.example.com", - // "sample-vm.us-west1-b.c.my-project.internal" - // - // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance name - // of a GCE instance. This is the value provided by `host.name`, the visible - // name of the instance in the Cloud Console UI, and the prefix for the default - // hostname of the instance as defined by the [default internal DNS name]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-1", "my-vm-name" - // - // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPClientService returns an attribute KeyValue conforming to the -// "gcp.client.service" semantic conventions. It represents the identifies the -// Google Cloud service for which the official client library is intended. -func GCPClientService(val string) attribute.KeyValue { - return GCPClientServiceKey.String(val) -} - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of -// the Cloud Run [execution] being run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`] environment variable. -// -// [execution]: https://cloud.google.com/run/docs/managing/job-executions -// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] -// environment variable. -// -// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom hostname] -// . -// -// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance name -// of a GCE instance. This is the value provided by `host.name`, the visible name -// of the instance in the Cloud Console UI, and the prefix for the default -// hostname of the instance as defined by the [default internal DNS name]. -// -// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Namespace: gen_ai -const ( - // GenAIOpenaiRequestResponseFormatKey is the attribute Key conforming to the - // "gen_ai.openai.request.response_format" semantic conventions. It represents - // the response format that is requested. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "json" - GenAIOpenaiRequestResponseFormatKey = attribute.Key("gen_ai.openai.request.response_format") - - // GenAIOpenaiRequestServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.request.service_tier" semantic conventions. It represents the - // service tier requested. May be a specific tier, default, or auto. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "auto", "default" - GenAIOpenaiRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") - - // GenAIOpenaiResponseServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.response.service_tier" semantic conventions. It represents the - // service tier used for the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "scale", "default" - GenAIOpenaiResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") - - // GenAIOpenaiResponseSystemFingerprintKey is the attribute Key conforming to - // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It - // represents a fingerprint to track any eventual change in the Generative AI - // environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fp_44709d6fcb" - GenAIOpenaiResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") - - // GenAIOperationNameKey is the attribute Key conforming to the - // "gen_ai.operation.name" semantic conventions. It represents the name of the - // operation being performed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If one of the predefined values applies, but specific system uses a - // different name it's RECOMMENDED to document it in the semantic conventions - // for specific GenAI system and use system-specific name in the - // instrumentation. If a different name is not documented, instrumentation - // libraries SHOULD use applicable predefined value. - GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") - - // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the - // "gen_ai.request.encoding_formats" semantic conventions. It represents the - // encoding formats requested in an embeddings operation, if specified. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "base64"], ["float", "binary" - // Note: In some GenAI systems the encoding formats are called embedding types. - // Also, some GenAI systems only accept a single format per request. - GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") - - // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the - // "gen_ai.request.frequency_penalty" semantic conventions. It represents the - // frequency penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") - - // GenAIRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum - // number of tokens the model generates for a request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAIRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of the - // GenAI model a request is being made to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: gpt-4 - GenAIRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the - // "gen_ai.request.presence_penalty" semantic conventions. It represents the - // presence penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") - - // GenAIRequestSeedKey is the attribute Key conforming to the - // "gen_ai.request.seed" semantic conventions. It represents the requests with - // same seed value more likely to return same result. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") - - // GenAIRequestStopSequencesKey is the attribute Key conforming to the - // "gen_ai.request.stop_sequences" semantic conventions. It represents the list - // of sequences that the model will use to stop generating further tokens. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "forest", "lived" - GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") - - // GenAIRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.0 - GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAIRequestTopKKey is the attribute Key conforming to the - // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") - - // GenAIRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAIResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to each - // generation received. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "stop"], ["stop", "length" - GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAIResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "chatcmpl-123" - GenAIResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAIResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of the - // model that generated the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gpt-4-0613" - GenAIResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as identified - // by the client or server instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: openai - // Note: The `gen_ai.system` describes a family of GenAI models with specific - // model identified - // by `gen_ai.request.model` and `gen_ai.response.model` attributes. - // - // The actual GenAI product may differ from the one identified by the client. - // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI - // client - // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge, instead of the actual system. The - // `server.address` - // attribute may help identify the actual system in use for `openai`. - // - // For custom model, a custom friendly name SHOULD be used. - // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` - // . - GenAISystemKey = attribute.Key("gen_ai.system") - - // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" - // semantic conventions. It represents the type of token being counted. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "input", "output" - GenAITokenTypeKey = attribute.Key("gen_ai.token.type") - - // GenAIUsageInputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of - // tokens used in the GenAI input (prompt). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") - - // GenAIUsageOutputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.output_tokens" semantic conventions. It represents the number - // of tokens used in the GenAI response (completion). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 180 - GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") -) - -// GenAIOpenaiResponseServiceTier returns an attribute KeyValue conforming to the -// "gen_ai.openai.response.service_tier" semantic conventions. It represents the -// service tier used for the response. -func GenAIOpenaiResponseServiceTier(val string) attribute.KeyValue { - return GenAIOpenaiResponseServiceTierKey.String(val) -} - -// GenAIOpenaiResponseSystemFingerprint returns an attribute KeyValue conforming -// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It -// represents a fingerprint to track any eventual change in the Generative AI -// environment. -func GenAIOpenaiResponseSystemFingerprint(val string) attribute.KeyValue { - return GenAIOpenaiResponseSystemFingerprintKey.String(val) -} - -// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the -// "gen_ai.request.encoding_formats" semantic conventions. It represents the -// encoding formats requested in an embeddings operation, if specified. -func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { - return GenAIRequestEncodingFormatsKey.StringSlice(val) -} - -// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.frequency_penalty" semantic conventions. It represents the -// frequency penalty setting for the GenAI request. -func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { - return GenAIRequestFrequencyPenaltyKey.Float64(val) -} - -// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the model generates for a request. -func GenAIRequestMaxTokens(val int) attribute.KeyValue { - return GenAIRequestMaxTokensKey.Int(val) -} - -// GenAIRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// GenAI model a request is being made to. -func GenAIRequestModel(val string) attribute.KeyValue { - return GenAIRequestModelKey.String(val) -} - -// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.presence_penalty" semantic conventions. It represents the -// presence penalty setting for the GenAI request. -func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { - return GenAIRequestPresencePenaltyKey.Float64(val) -} - -// GenAIRequestSeed returns an attribute KeyValue conforming to the -// "gen_ai.request.seed" semantic conventions. It represents the requests with -// same seed value more likely to return same result. -func GenAIRequestSeed(val int) attribute.KeyValue { - return GenAIRequestSeedKey.Int(val) -} - -// GenAIRequestStopSequences returns an attribute KeyValue conforming to the -// "gen_ai.request.stop_sequences" semantic conventions. It represents the list -// of sequences that the model will use to stop generating further tokens. -func GenAIRequestStopSequences(val ...string) attribute.KeyValue { - return GenAIRequestStopSequencesKey.StringSlice(val) -} - -// GenAIRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the GenAI request. -func GenAIRequestTemperature(val float64) attribute.KeyValue { - return GenAIRequestTemperatureKey.Float64(val) -} - -// GenAIRequestTopK returns an attribute KeyValue conforming to the -// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling -// setting for the GenAI request. -func GenAIRequestTopK(val float64) attribute.KeyValue { - return GenAIRequestTopKKey.Float64(val) -} - -// GenAIRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling -// setting for the GenAI request. -func GenAIRequestTopP(val float64) attribute.KeyValue { - return GenAIRequestTopPKey.Float64(val) -} - -// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the -// "gen_ai.response.finish_reasons" semantic conventions. It represents the array -// of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAIResponseFinishReasonsKey.StringSlice(val) -} - -// GenAIResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique identifier -// for the completion. -func GenAIResponseID(val string) attribute.KeyValue { - return GenAIResponseIDKey.String(val) -} - -// GenAIResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// model that generated the response. -func GenAIResponseModel(val string) attribute.KeyValue { - return GenAIResponseModelKey.String(val) -} - -// GenAIUsageInputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI input (prompt). -func GenAIUsageInputTokens(val int) attribute.KeyValue { - return GenAIUsageInputTokensKey.Int(val) -} - -// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI response (completion). -func GenAIUsageOutputTokens(val int) attribute.KeyValue { - return GenAIUsageOutputTokensKey.Int(val) -} - -// Enum values for gen_ai.openai.request.response_format -var ( - // Text response format - // Stability: development - GenAIOpenaiRequestResponseFormatText = GenAIOpenaiRequestResponseFormatKey.String("text") - // JSON object response format - // Stability: development - GenAIOpenaiRequestResponseFormatJSONObject = GenAIOpenaiRequestResponseFormatKey.String("json_object") - // JSON schema response format - // Stability: development - GenAIOpenaiRequestResponseFormatJSONSchema = GenAIOpenaiRequestResponseFormatKey.String("json_schema") -) - -// Enum values for gen_ai.openai.request.service_tier -var ( - // The system will utilize scale tier credits until they are exhausted. - // Stability: development - GenAIOpenaiRequestServiceTierAuto = GenAIOpenaiRequestServiceTierKey.String("auto") - // The system will utilize the default scale tier. - // Stability: development - GenAIOpenaiRequestServiceTierDefault = GenAIOpenaiRequestServiceTierKey.String("default") -) - -// Enum values for gen_ai.operation.name -var ( - // Chat completion operation such as [OpenAI Chat API] - // Stability: development - // - // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat - GenAIOperationNameChat = GenAIOperationNameKey.String("chat") - // Text completions operation such as [OpenAI Completions API (Legacy)] - // Stability: development - // - // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions - GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") - // Embeddings operation such as [OpenAI Create embeddings API] - // Stability: development - // - // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create - GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") -) - -// Enum values for gen_ai.system -var ( - // OpenAI - // Stability: development - GenAISystemOpenai = GenAISystemKey.String("openai") - // Vertex AI - // Stability: development - GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") - // Gemini - // Stability: development - GenAISystemGemini = GenAISystemKey.String("gemini") - // Anthropic - // Stability: development - GenAISystemAnthropic = GenAISystemKey.String("anthropic") - // Cohere - // Stability: development - GenAISystemCohere = GenAISystemKey.String("cohere") - // Azure AI Inference - // Stability: development - GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") - // Azure OpenAI - // Stability: development - GenAISystemAzAIOpenai = GenAISystemKey.String("az.ai.openai") - // IBM Watsonx AI - // Stability: development - GenAISystemIbmWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") - // AWS Bedrock - // Stability: development - GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") - // Perplexity - // Stability: development - GenAISystemPerplexity = GenAISystemKey.String("perplexity") - // xAI - // Stability: development - GenAISystemXai = GenAISystemKey.String("xai") - // DeepSeek - // Stability: development - GenAISystemDeepseek = GenAISystemKey.String("deepseek") - // Groq - // Stability: development - GenAISystemGroq = GenAISystemKey.String("groq") - // Mistral AI - // Stability: development - GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") -) - -// Enum values for gen_ai.token.type -var ( - // Input tokens (prompt, input, etc.) - // Stability: development - GenAITokenTypeInput = GenAITokenTypeKey.String("input") - // Output tokens (completion, response, etc.) - // Stability: development - GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") -) - -// Namespace: geo -const ( - // GeoContinentCodeKey is the attribute Key conforming to the - // "geo.continent.code" semantic conventions. It represents the two-letter code - // representing continent’s name. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - GeoContinentCodeKey = attribute.Key("geo.continent.code") - - // GeoCountryIsoCodeKey is the attribute Key conforming to the - // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO - // Country Code ([ISO 3166-1 alpha2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA" - // - // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes - GeoCountryIsoCodeKey = attribute.Key("geo.country.iso_code") - - // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" - // semantic conventions. It represents the locality name. Represents the name of - // a city, town, village, or similar populated place. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Montreal", "Berlin" - GeoLocalityNameKey = attribute.Key("geo.locality.name") - - // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" - // semantic conventions. It represents the latitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 45.505918 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLatKey = attribute.Key("geo.location.lat") - - // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" - // semantic conventions. It represents the longitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -73.61483 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLonKey = attribute.Key("geo.location.lon") - - // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" - // semantic conventions. It represents the postal code associated with the - // location. Values appropriate for this field may also be known as a postcode - // or ZIP code and will vary widely from country to country. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "94040" - GeoPostalCodeKey = attribute.Key("geo.postal_code") - - // GeoRegionIsoCodeKey is the attribute Key conforming to the - // "geo.region.iso_code" semantic conventions. It represents the region ISO code - // ([ISO 3166-2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA-QC" - // - // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 - GeoRegionIsoCodeKey = attribute.Key("geo.region.iso_code") -) - -// GeoCountryIsoCode returns an attribute KeyValue conforming to the -// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO -// Country Code ([ISO 3166-1 alpha2]). -// -// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes -func GeoCountryIsoCode(val string) attribute.KeyValue { - return GeoCountryIsoCodeKey.String(val) -} - -// GeoLocalityName returns an attribute KeyValue conforming to the -// "geo.locality.name" semantic conventions. It represents the locality name. -// Represents the name of a city, town, village, or similar populated place. -func GeoLocalityName(val string) attribute.KeyValue { - return GeoLocalityNameKey.String(val) -} - -// GeoLocationLat returns an attribute KeyValue conforming to the -// "geo.location.lat" semantic conventions. It represents the latitude of the geo -// location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLat(val float64) attribute.KeyValue { - return GeoLocationLatKey.Float64(val) -} - -// GeoLocationLon returns an attribute KeyValue conforming to the -// "geo.location.lon" semantic conventions. It represents the longitude of the -// geo location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLon(val float64) attribute.KeyValue { - return GeoLocationLonKey.Float64(val) -} - -// GeoPostalCode returns an attribute KeyValue conforming to the -// "geo.postal_code" semantic conventions. It represents the postal code -// associated with the location. Values appropriate for this field may also be -// known as a postcode or ZIP code and will vary widely from country to country. -func GeoPostalCode(val string) attribute.KeyValue { - return GeoPostalCodeKey.String(val) -} - -// GeoRegionIsoCode returns an attribute KeyValue conforming to the -// "geo.region.iso_code" semantic conventions. It represents the region ISO code -// ([ISO 3166-2]). -// -// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 -func GeoRegionIsoCode(val string) attribute.KeyValue { - return GeoRegionIsoCodeKey.String(val) -} - -// Enum values for geo.continent.code -var ( - // Africa - // Stability: development - GeoContinentCodeAf = GeoContinentCodeKey.String("AF") - // Antarctica - // Stability: development - GeoContinentCodeAn = GeoContinentCodeKey.String("AN") - // Asia - // Stability: development - GeoContinentCodeAs = GeoContinentCodeKey.String("AS") - // Europe - // Stability: development - GeoContinentCodeEu = GeoContinentCodeKey.String("EU") - // North America - // Stability: development - GeoContinentCodeNa = GeoContinentCodeKey.String("NA") - // Oceania - // Stability: development - GeoContinentCodeOc = GeoContinentCodeKey.String("OC") - // South America - // Stability: development - GeoContinentCodeSa = GeoContinentCodeKey.String("SA") -) - -// Namespace: go -const ( - // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" - // semantic conventions. It represents the type of memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "other", "stack" - GoMemoryTypeKey = attribute.Key("go.memory.type") -) - -// Enum values for go.memory.type -var ( - // Memory allocated from the heap that is reserved for stack space, whether or - // not it is currently in-use. - // Stability: development - GoMemoryTypeStack = GoMemoryTypeKey.String("stack") - // Memory used by the Go runtime, excluding other categories of memory usage - // described in this enumeration. - // Stability: development - GoMemoryTypeOther = GoMemoryTypeKey.String("other") -) - -// Namespace: graphql -const ( - // GraphqlDocumentKey is the attribute Key conforming to the "graphql.document" - // semantic conventions. It represents the GraphQL document being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: query findBookById { bookById(id: ?) { name } } - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of the - // operation being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: findBookById - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of the - // operation being executed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "query", "mutation", "subscription" - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} - -// Enum values for graphql.operation.type -var ( - // GraphQL query - // Stability: development - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - // Stability: development - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - // Stability: development - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// Namespace: heroku -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit hash - // for the current release. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents the - // time and date the release was created. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2022-10-23T18:00:42Z" - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" -// semantic conventions. It represents the unique identifier for the application. -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release. -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the -// "heroku.release.creation_timestamp" semantic conventions. It represents the -// time and date the release was created. -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// Namespace: host -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is running - // on. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of - // level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" - // semantic conventions. It represents the family or generation of the CPU. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "PA-RISC 1.1e" - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" - // semantic conventions. It represents the model identifier. It provides more - // granular information about the CPU, distinguishing it from other CPUs within - // the same family. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "9000/778/B180L" - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" - // semantic conventions. It represents the stepping or core revisions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1", "r1p1" - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "GenuineIntel" - // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX - // registers. Writing these to memory in this order results in a 12-character - // string. - // - // [CPUID]: https://wiki.osdev.org/CPUID - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be the - // instance_id assigned by the cloud provider. For non-containerized systems, - // this should be the `machine-id`. See the table below for the sources to use - // to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fdbf79e8af94cb7f9e8df36789187052" - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. For - // Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ami-07b06b442921831e5" - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the "host.image.name" - // semantic conventions. It represents the name of the VM image or OS install - // the host was instantiated from. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version string - // of the VM image or host OS as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC 5952] format. - // - // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as - // hyphen-separated octets in uppercase hexadecimal form from most to least - // significant. - // - // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified hostname, - // or another name specified by the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-test" - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "n1-standard-1" - HostTypeKey = attribute.Key("host.type") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or generation -// of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model identifier. -// It provides more granular information about the CPU, distinguishing it from -// other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use to -// determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the "host.image.id" -// semantic conventions. It represents the vM image ID or host OS image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM image -// or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string of -// the VM image or host OS as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic -// conventions. It represents the available MAC addresses of the host, excluding -// loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" semantic -// conventions. It represents the name of the host. On Unix systems, it may -// contain what the hostname command returns, or the fully qualified hostname, or -// another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" semantic -// conventions. It represents the type of host. For Cloud, this must be the -// machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Enum values for host.arch -var ( - // AMD64 - // Stability: development - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - // Stability: development - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - // Stability: development - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - // Stability: development - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - // Stability: development - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - // Stability: development - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - // Stability: development - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - // Stability: development - HostArchX86 = HostArchKey.String("x86") -) - -// Namespace: http -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of the - // HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "idle" - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of the - // request payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP request - // method. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GET", "POST", "HEAD" - // Note: HTTP request method value SHOULD be "known" to the instrumentation. - // By default, this convention defines "known" methods as the ones listed in - // [RFC9110] - // and the PATCH method defined in [RFC5789]. - // - // If the HTTP request method is not known to instrumentation, it MUST set the - // `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of - // case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is not a - // list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods to be - // case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - // - // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods - // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GeT", "ACL", "foo" - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including redirects). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending (e.g. - // redirection, authorization failure, 503 Server Unavailable, network issues, - // or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" - // semantic conventions. It represents the total size of the request in bytes. - // This should be the total number of bytes sent over the wire, including the - // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request - // body if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size of the - // response payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size of - // the response in bytes. This should be the total number of bytes sent over the - // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), - // headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status code]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 200 - // - // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic - // conventions. It represents the matched route, that is, the path template in - // the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/users/:userID?", "{controller}/{action}/{id?}" - // Note: MUST NOT be populated when this is not supported by the HTTP server - // framework as the route attribute should have low-cardinality and the URI path - // can NOT substitute it. - // SHOULD include the [application root] if there is one. - // - // [application root]: /docs/http/http-spans.md#http-server-definitions - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of the -// request in bytes. This should be the total number of bytes sent over the wire, -// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, -// and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of the -// response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of the -// response in bytes. This should be the total number of bytes sent over the -// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the -// [HTTP response status code]. -// -// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Enum values for http.connection.state -var ( - // active state. - // Stability: development - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state. - // Stability: development - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -// Enum values for http.request.method -var ( - // CONNECT method. - // Stability: stable - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method. - // Stability: stable - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method. - // Stability: stable - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method. - // Stability: stable - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method. - // Stability: stable - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method. - // Stability: stable - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method. - // Stability: stable - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method. - // Stability: stable - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method. - // Stability: stable - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of. - // Stability: stable - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// Namespace: hw -const ( - // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. - // It represents an identifier for the hardware component, unique within the - // monitored host. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "win32battery_battery_testsysa33_1" - HwIDKey = attribute.Key("hw.id") - - // HwNameKey is the attribute Key conforming to the "hw.name" semantic - // conventions. It represents an easily-recognizable name for the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "eth0" - HwNameKey = attribute.Key("hw.name") - - // HwParentKey is the attribute Key conforming to the "hw.parent" semantic - // conventions. It represents the unique identifier of the parent component - // (typically the `hw.id` attribute of the enclosure, or disk controller). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dellStorage_perc_0" - HwParentKey = attribute.Key("hw.parent") - - // HwStateKey is the attribute Key conforming to the "hw.state" semantic - // conventions. It represents the current state of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwStateKey = attribute.Key("hw.state") - - // HwTypeKey is the attribute Key conforming to the "hw.type" semantic - // conventions. It represents the type of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: Describes the category of the hardware component for which `hw.state` - // is being reported. For example, `hw.type=temperature` along with - // `hw.state=degraded` would indicate that the temperature of the hardware - // component has been reported as `degraded`. - HwTypeKey = attribute.Key("hw.type") -) - -// HwID returns an attribute KeyValue conforming to the "hw.id" semantic -// conventions. It represents an identifier for the hardware component, unique -// within the monitored host. -func HwID(val string) attribute.KeyValue { - return HwIDKey.String(val) -} - -// HwName returns an attribute KeyValue conforming to the "hw.name" semantic -// conventions. It represents an easily-recognizable name for the hardware -// component. -func HwName(val string) attribute.KeyValue { - return HwNameKey.String(val) -} - -// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic -// conventions. It represents the unique identifier of the parent component -// (typically the `hw.id` attribute of the enclosure, or disk controller). -func HwParent(val string) attribute.KeyValue { - return HwParentKey.String(val) -} - -// Enum values for hw.state -var ( - // Ok - // Stability: development - HwStateOk = HwStateKey.String("ok") - // Degraded - // Stability: development - HwStateDegraded = HwStateKey.String("degraded") - // Failed - // Stability: development - HwStateFailed = HwStateKey.String("failed") -) - -// Enum values for hw.type -var ( - // Battery - // Stability: development - HwTypeBattery = HwTypeKey.String("battery") - // CPU - // Stability: development - HwTypeCPU = HwTypeKey.String("cpu") - // Disk controller - // Stability: development - HwTypeDiskController = HwTypeKey.String("disk_controller") - // Enclosure - // Stability: development - HwTypeEnclosure = HwTypeKey.String("enclosure") - // Fan - // Stability: development - HwTypeFan = HwTypeKey.String("fan") - // GPU - // Stability: development - HwTypeGpu = HwTypeKey.String("gpu") - // Logical disk - // Stability: development - HwTypeLogicalDisk = HwTypeKey.String("logical_disk") - // Memory - // Stability: development - HwTypeMemory = HwTypeKey.String("memory") - // Network - // Stability: development - HwTypeNetwork = HwTypeKey.String("network") - // Physical disk - // Stability: development - HwTypePhysicalDisk = HwTypeKey.String("physical_disk") - // Power supply - // Stability: development - HwTypePowerSupply = HwTypeKey.String("power_supply") - // Tape drive - // Stability: development - HwTypeTapeDrive = HwTypeKey.String("tape_drive") - // Temperature - // Stability: development - HwTypeTemperature = HwTypeKey.String("temperature") - // Voltage - // Stability: development - HwTypeVoltage = HwTypeKey.String("voltage") -) - -// Namespace: k8s -const ( - // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" - // semantic conventions. It represents the name of the cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-cluster" - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" - // semantic conventions. It represents a pseudo-ID for the cluster, set to the - // UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8s cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8s ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T X.667]. - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // > different from all other UUIDs generated before 3603 A.D., or is - // > extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - // - // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "redis" - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the number - // of times the container was restarted. This attribute can be used to identify - // a particular container (running or stopped) within a container spec. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to - // the "k8s.container.status.last_terminated_reason" semantic conventions. It - // represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Evicted", "Error" - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" - // semantic conventions. It represents the name of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" - // semantic conventions. It represents the UID of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" - // semantic conventions. It represents the UID of the DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic - // conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic - // conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "default" - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNamespacePhaseKey is the attribute Key conforming to the - // "k8s.namespace.phase" semantic conventions. It represents the phase of the - // K8s namespace. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "terminating" - // Note: This attribute aligns with the `phase` field of the - // [K8s NamespaceStatus] - // - // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core - K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "node-1" - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic - // conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic - // conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-pod-autoconf" - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic - // conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" - // semantic conventions. It represents the name of the K8s volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "volume0" - K8SVolumeNameKey = attribute.Key("k8s.volume.name") - - // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" - // semantic conventions. It represents the type of the K8s volume. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "emptyDir", "persistentVolumeClaim" - K8SVolumeTypeKey = attribute.Key("k8s.volume.type") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify a -// particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" -// semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SVolumeName returns an attribute KeyValue conforming to the -// "k8s.volume.name" semantic conventions. It represents the name of the K8s -// volume. -func K8SVolumeName(val string) attribute.KeyValue { - return K8SVolumeNameKey.String(val) -} - -// Enum values for k8s.namespace.phase -var ( - // Active namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") - // Terminating namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") -) - -// Enum values for k8s.volume.type -var ( - // A [persistentVolumeClaim] volume - // Stability: development - // - // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim - K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") - // A [configMap] volume - // Stability: development - // - // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap - K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") - // A [downwardAPI] volume - // Stability: development - // - // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi - K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") - // An [emptyDir] volume - // Stability: development - // - // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir - K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") - // A [secret] volume - // Stability: development - // - // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret - K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") - // A [local] volume - // Stability: development - // - // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local - K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") -) - -// Namespace: linux -const ( - // LinuxMemorySlabStateKey is the attribute Key conforming to the - // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab - // memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "reclaimable", "unreclaimable" - LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") -) - -// Enum values for linux.memory.slab.state -var ( - // reclaimable - // Stability: development - LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") - // unreclaimable - // Stability: development - LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") -) - -// Namespace: log -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "audit.log" - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the basename of - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "uuid.log" - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/log/mysql/audit.log" - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full path to - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/lib/docker/uuid.log" - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") - - // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic - // conventions. It represents the stream associated with the log. See below for - // a list of well-known values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - LogIostreamKey = attribute.Key("log.iostream") - - // LogRecordOriginalKey is the attribute Key conforming to the - // "log.record.original" semantic conventions. It represents the complete - // original Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - - // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" - // Note: This value MAY be added when processing a Log Record which was - // originally transmitted as a string or equivalent data type AND the Body field - // of the Log Record does not contain the same value. (e.g. a syslog or a log - // record read from a file.) - LogRecordOriginalKey = attribute.Key("log.record.original") - - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an - // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other - // identifiers (e.g. UUID) may be used as needed. - // - // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogFileName returns an attribute KeyValue conforming to the "log.file.name" -// semantic conventions. It represents the basename of the file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" -// semantic conventions. It represents the full path to the file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path to -// the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// LogRecordOriginal returns an attribute KeyValue conforming to the -// "log.record.original" semantic conventions. It represents the complete -// original Log Record. -func LogRecordOriginal(val string) attribute.KeyValue { - return LogRecordOriginalKey.String(val) -} - -// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" -// semantic conventions. It represents a unique identifier for the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Enum values for log.iostream -var ( - // Logs from stdout stream - // Stability: development - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - // Stability: development - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Namespace: messaging -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the batching - // operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client library - // supports both batch and single-message API for the same operation, - // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs - // and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique identifier - // for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "client-5", "myhost@8742@s8083jm" - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingConsumerGroupNameKey is the attribute Key conforming to the - // "messaging.consumer.group.name" semantic conventions. It represents the name - // of the consumer group with which a consumer is associated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-group", "indexer" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.consumer.group.name` is applicable and what it means in - // the context of that system. - MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the message - // destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MyQueue", "MyTopic" - // Note: Destination name SHOULD uniquely identify a specific queue, topic or - // other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD uniquely - // identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to the - // "messaging.destination.partition.id" semantic conventions. It represents the - // identifier of the partition messages are sent to or received from, unique - // within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to - // the "messaging.destination.subscription.name" semantic conventions. It - // represents the name of the destination subscription from which a message is - // consumed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "subscription-a" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.destination.subscription.name` is applicable and what it - // means in the context of that system. - MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the low - // cardinality representation of the messaging destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/customers/{customerId}" - // Note: Destination names could be constructed from templates. An example would - // be a destination name involving a user name or product id. Although the - // destination name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") - - // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It - // represents the ack deadline in seconds set for the modify ack deadline - // request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the - // ack id for a given message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ack_id - MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. - // It represents the delivery attempt for a given message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It - // represents the ordering key for a given message. If the attribute is not - // present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ordering_key - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the message - // keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message.id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents a - // boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingKafkaOffsetKey is the attribute Key conforming to the - // "messaging.kafka.offset" semantic conventions. It represents the offset of a - // record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the size of - // the message body in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed body size. If - // both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents the - // conversation ID identifying the conversation to which the message belongs, - // represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: MyConversationId - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents the - // size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed size. If both - // sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used by - // the messaging system as an identifier for the message, represented as a - // string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 452a7c7c7c7048c2f887f61572b18fc2 - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ack", "nack", "send" - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key conforming to - // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It - // represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming to the - // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents - // the rabbitMQ message delivery tag. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to the - // "messaging.rocketmq.consumption_model" semantic conventions. It represents - // the model of message consumption. This only applies to consumer spans. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key conforming to - // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It - // represents the delay time level for delay message, which determines the - // message delay time. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key conforming - // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. - // It represents the timestamp in milliseconds that the delay message is - // expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents the it - // is essential for FIFO message. Messages that belong to the same message group - // are always processed one by one within the same consumer group. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myMessageGroup - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents the - // key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "keyA", "keyB" - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: tagA - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents the - // type of message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myNamespace - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingServicebusDispositionStatusKey is the attribute Key conforming to - // the "messaging.servicebus.disposition_status" semantic conventions. It - // represents the describes the [settlement type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock - MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServicebusMessageDeliveryCountKey is the attribute Key conforming to - // the "messaging.servicebus.message.delivery_count" semantic conventions. It - // represents the number of deliveries that have been attempted for this - // message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.servicebus.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") - - // MessagingSystemKey is the attribute Key conforming to the "messaging.system" - // semantic conventions. It represents the messaging system as identified by the - // client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate with - // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the - // instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to the -// "messaging.batch.message_count" semantic conventions. It represents the number -// of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique identifier -// for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingConsumerGroupName returns an attribute KeyValue conforming to the -// "messaging.consumer.group.name" semantic conventions. It represents the name -// of the consumer group with which a consumer is associated. -func MessagingConsumerGroupName(val string) attribute.KeyValue { - return MessagingConsumerGroupNameKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the -// "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be unnamed -// or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name. -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming to -// the "messaging.destination.partition.id" semantic conventions. It represents -// the identifier of the partition messages are sent to or received from, unique -// within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming -// to the "messaging.destination.subscription.name" semantic conventions. It -// represents the name of the destination subscription from which a message is -// consumed. -func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingDestinationSubscriptionNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to the -// "messaging.destination.template" semantic conventions. It represents the low -// cardinality representation of the messaging destination name. -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to the -// "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue conforming -// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It -// represents the UTC epoch seconds at which the message has been accepted and -// stored in the entity. -func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) -} - -// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It -// represents the ack deadline in seconds set for the modify ack deadline -// request. -func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming to the -// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the -// ack id for a given message. -func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageAckIDKey.String(val) -} - -// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It -// represents the ordering key for a given message. If the attribute is not -// present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the message -// keys in Kafka are used for grouping alike messages to ensure they're processed -// on the same partition. They differ from `messaging.message.id` in that they're -// not unique. If the key is `null`, the attribute MUST NOT be set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the -// "messaging.kafka.message.tombstone" semantic conventions. It represents a -// boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingKafkaOffset returns an attribute KeyValue conforming to the -// "messaging.kafka.offset" semantic conventions. It represents the offset of a -// record in the corresponding Kafka partition. -func MessagingKafkaOffset(val int) attribute.KeyValue { - return MessagingKafkaOffsetKey.Int(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size of -// the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming to the -// "messaging.message.conversation_id" semantic conventions. It represents the -// conversation ID identifying the conversation to which the message belongs, -// represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the -// "messaging.message.envelope.size" semantic conventions. It represents the size -// of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by the -// messaging system as an identifier for the message, represented as a string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue conforming -// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It -// represents the rabbitMQ message delivery tag. -func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitmqMessageDeliveryTagKey.Int(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.group" semantic conventions. It represents the it -// is essential for FIFO message. Messages that belong to the same message group -// are always processed one by one within the same consumer group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.keys" semantic conventions. It represents the -// key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to the -// "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServicebusMessageDeliveryCountKey.Int(val) -} - -// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has been -// accepted and stored in the entity. -func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServicebusMessageEnqueuedTimeKey.Int(val) -} - -// Enum values for messaging.operation.type -var ( - // A message is created. "Create" spans always refer to a single message and are - // used to provide a unique creation context for messages in batch sending - // scenarios. - // - // Stability: development - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are provided for sending to an intermediary. If a single - // message is sent, the context of the "Send" span can be used as the creation - // context and no "Create" span needs to be created. - // - // Stability: development - MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") - // One or more messages are requested by a consumer. This operation refers to - // pull-based scenarios, where consumers explicitly call methods of messaging - // SDKs to receive messages. - // - // Stability: development - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are processed by a consumer. - // - // Stability: development - MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") - // One or more messages are settled. - // - // Stability: development - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") - // Deprecated: Replaced by `process`. - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") - // Deprecated: Replaced by `send`. - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") -) - -// Enum values for messaging.rocketmq.consumption_model -var ( - // Clustering consumption model - // Stability: development - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - // Stability: development - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// Enum values for messaging.rocketmq.message.type -var ( - // Normal message - // Stability: development - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - // Stability: development - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - // Stability: development - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - // Stability: development - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -// Enum values for messaging.servicebus.disposition_status -var ( - // Message is completed - // Stability: development - MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") - // Message is abandoned - // Stability: development - MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - // Stability: development - MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") - // Message is deferred - // Stability: development - MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") -) - -// Enum values for messaging.system -var ( - // Apache ActiveMQ - // Stability: development - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - // Stability: development - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - // Stability: development - MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - // Stability: development - MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - // Stability: development - MessagingSystemServicebus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - // Stability: development - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - // Stability: development - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - // Stability: development - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - // Stability: development - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - // Stability: development - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") - // Apache Pulsar - // Stability: development - MessagingSystemPulsar = MessagingSystemKey.String("pulsar") -) - -// Namespace: network -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier network. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: DE - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile carrier - // country code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 310 - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile carrier - // network code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 001 - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of the - // mobile carrier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: sprint - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionStateKey is the attribute Key conforming to the - // "network.connection.state" semantic conventions. It represents the state of - // network connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "close_wait" - // Note: Connection states are defined as part of the [rfc9293] - // - // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 - NetworkConnectionStateKey = attribute.Key("network.connection.state") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the this - // describes more details regarding the connection.type. It may be the type of - // cell technology connection, but it could be used for describing details about - // a wifi connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: LTE - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the internet - // connection type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: wifi - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkInterfaceNameKey is the attribute Key conforming to the - // "network.interface.name" semantic conventions. It represents the network - // interface name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lo", "eth0" - NetworkInterfaceNameKey = attribute.Key("network.interface.name") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "transmit" - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" - // semantic conventions. It represents the peer port number of the network - // connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the - // [OSI application layer] or non-OSI equivalent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "amqp", "http", "mqtt" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI application layer]: https://wikipedia.org/wiki/Application_layer - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the actual - // version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.1", "2" - // Note: If protocol version is subject to negotiation (for example using [ALPN] - // ), this attribute SHOULD be set to the negotiated version. If the actual - // protocol version is not known, this attribute SHOULD NOT be set. - // - // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the - // [OSI transport layer] or [inter-process communication method]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "tcp", "udp" - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port 12345. - // - // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer - // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic - // conventions. It represents the [OSI network layer] or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "ipv4", "ipv6" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI network layer]: https://wikipedia.org/wiki/Network_layer - NetworkTypeKey = attribute.Key("network.type") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkInterfaceName returns an attribute KeyValue conforming to the -// "network.interface.name" semantic conventions. It represents the network -// interface name. -func NetworkInterfaceName(val string) attribute.KeyValue { - return NetworkInterfaceNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local address -// of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port number -// of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address of -// the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the -// [OSI application layer] or non-OSI equivalent. -// -// [OSI application layer]: https://wikipedia.org/wiki/Application_layer -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Enum values for network.connection.state -var ( - // closed - // Stability: development - NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") - // close_wait - // Stability: development - NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") - // closing - // Stability: development - NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") - // established - // Stability: development - NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") - // fin_wait_1 - // Stability: development - NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") - // fin_wait_2 - // Stability: development - NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") - // last_ack - // Stability: development - NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") - // listen - // Stability: development - NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") - // syn_received - // Stability: development - NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") - // syn_sent - // Stability: development - NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") - // time_wait - // Stability: development - NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") -) - -// Enum values for network.connection.subtype -var ( - // GPRS - // Stability: development - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - // Stability: development - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - // Stability: development - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - // Stability: development - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - // Stability: development - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - // Stability: development - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - // Stability: development - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - // Stability: development - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - // Stability: development - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - // Stability: development - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - // Stability: development - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - // Stability: development - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - // Stability: development - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - // Stability: development - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - // Stability: development - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - // Stability: development - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - // Stability: development - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - // Stability: development - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - // Stability: development - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - // Stability: development - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - // Stability: development - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -// Enum values for network.connection.type -var ( - // wifi - // Stability: development - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - // Stability: development - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - // Stability: development - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - // Stability: development - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - // Stability: development - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -// Enum values for network.io.direction -var ( - // transmit - // Stability: development - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - // Stability: development - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -// Enum values for network.transport -var ( - // TCP - // Stability: stable - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - // Stability: stable - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe. - // Stability: stable - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - // Stability: stable - NetworkTransportUnix = NetworkTransportKey.String("unix") - // QUIC - // Stability: development - NetworkTransportQUIC = NetworkTransportKey.String("quic") -) - -// Enum values for network.type -var ( - // IPv4 - // Stability: stable - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - // Stability: stable - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// Namespace: oci -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of the - // OCI image manifest. For container images specifically is the digest by which - // the container image is known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" - // Note: Follows [OCI Image Manifest Specification], and specifically the - // [Digest property]. - // An example can be found in [Example Image Manifest]. - // - // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md - // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests - // [Example Image Manifest]: https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// Namespace: opentracing -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the parent-child - // Reference type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -// Enum values for opentracing.ref_type -var ( - // The parent Span depends on the child Span in some capacity - // Stability: development - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - // Stability: development - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Namespace: os -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic - // conventions. It represents the unique identifier for a particular build or - // compilation of the operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TQ3C.230805.001.B2", "20E247", "22621" - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to be - // parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" semantic - // conventions. It represents the version string of the operating system as - // defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - OSVersionKey = attribute.Key("os.version") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the "os.description" -// semantic conventions. It represents the human readable (not intended to be -// parsed) OS version information, like e.g. reported by `ver` or -// `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating system -// as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Enum values for os.type -var ( - // Microsoft Windows - // Stability: development - OSTypeWindows = OSTypeKey.String("windows") - // Linux - // Stability: development - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - // Stability: development - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - // Stability: development - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - // Stability: development - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - // Stability: development - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - // Stability: development - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - // Stability: development - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - // Stability: development - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - // Stability: development - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - // Stability: development - OSTypeZOS = OSTypeKey.String("z_os") -) - -// Namespace: otel -const ( - // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" - // semantic conventions. It represents the name of the instrumentation scope - ( - // `InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "io.opentelemetry.contrib.mongodb" - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of the - // instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.0.0" - OTelScopeVersionKey = attribute.Key("otel.scope.version") - - // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" - // semantic conventions. It represents the name of the code, either "OK" or - // "ERROR". MUST NOT be set if the status code is UNSET. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the description - // of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "resource not found" - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the description -// of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Enum values for otel.status_code -var ( - // The operation has been validated by an Application developer or Operator to - // have completed successfully. - // Stability: stable - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error. - // Stability: stable - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// Namespace: peer -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic - // conventions. It represents the [`service.name`] of the remote service. SHOULD - // be equal to the actual `service.name` resource attribute of the remote - // service if any. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: AuthTokenCache - // - // [`service.name`]: /docs/resource/README.md#service - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the "peer.service" -// semantic conventions. It represents the [`service.name`] of the remote -// service. SHOULD be equal to the actual `service.name` resource attribute of -// the remote service if any. -// -// [`service.name`]: /docs/resource/README.md#service -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// Namespace: process -const ( - // ProcessArgsCountKey is the attribute Key conforming to the - // "process.args_count" semantic conventions. It represents the length of the - // process.command_args array. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 4 - // Note: This field can be useful for querying or performing bucket analysis on - // how many arguments were provided to start a process. More arguments may be an - // indication of suspicious activity. - ProcessArgsCountKey = attribute.Key("process.args_count") - - // ProcessCommandKey is the attribute Key conforming to the "process.command" - // semantic conventions. It represents the command used to launch the process - // (i.e. the command name). On Linux based systems, can be set to the zeroth - // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter - // extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otelcol" - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received by - // the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this - // would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otecol", "--config=config.yaml" - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full command - // used to launch the process as a single string representing the full command. - // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if - // you have to assemble it just for monitoring; use `process.command_args` - // instead. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were voluntary or - // involuntary. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and time - // the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:25:34.853Z" - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableBuildIDGnuKey is the attribute Key conforming to the - // "process.executable.build_id.gnu" semantic conventions. It represents the GNU - // build ID as found in the `.note.gnu.build-id` ELF section (hex string). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" - ProcessExecutableBuildIDGnuKey = attribute.Key("process.executable.build_id.gnu") - - // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the - // "process.executable.build_id.go" semantic conventions. It represents the Go - // build ID as retrieved by `go tool buildid `. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" - ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") - - // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the - // "process.executable.build_id.htlhash" semantic conventions. It represents the - // profiling specific build ID for executables. See the OTel specification for - // Profiles for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "600DCAFE4A110000F2BF38C493F5FB92" - ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name of the - // process executable. On Linux based systems, can be set to the `Name` in - // `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcol" - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full path - // to the process executable. On Linux based systems, can be set to the target - // of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/cmd/otelcol" - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" - // semantic conventions. It represents the exit code of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" - // semantic conventions. It represents the date and time the process exited, in - // ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:26:12.315Z" - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID of the - // process's group leader. This is also the process group ID (PGID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether the - // process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessLinuxCgroupKey is the attribute Key conforming to the - // "process.linux.cgroup" semantic conventions. It represents the control group - // associated with the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", - // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" - // Note: Control groups (cgroups) are a kernel feature used to organize and - // manage process resources. This attribute provides the path(s) to the - // cgroup(s) associated with the process, which should match the contents of the - // [/proc/[PID]/cgroup] file. - // - // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html - ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type of - // page fault for this data point. Type `major` is for major/hard page faults, - // and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent Process - // identifier (PPID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic - // conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user ID - // (RUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the username of - // the real user of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of the - // runtime of this process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "OpenJDK Runtime Environment" - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the version of - // the runtime of this process, as returned by the runtime without modification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14.0.2 - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved user ID - // (SUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the username of - // the saved user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID of - // the process's session leader. This is also the session ID (SID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessTitleKey is the attribute Key conforming to the "process.title" - // semantic conventions. It represents the process title (proctitle). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cat /etc/hostname", "xfce4-session", "bash" - // Note: In many Unix-like systems, process title (proctitle), is the string - // that represents the name or command line of a running process, displayed by - // system monitoring tools like ps, top, and htop. - ProcessTitleKey = attribute.Key("process.title") - - // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" - // semantic conventions. It represents the effective user ID (EUID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" - // semantic conventions. It represents the username of the effective user of the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic - // conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily unique - // across all processes on the host but it is unique within the process - // namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") - - // ProcessWorkingDirectoryKey is the attribute Key conforming to the - // "process.working_directory" semantic conventions. It represents the working - // directory of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/root" - ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") -) - -// ProcessArgsCount returns an attribute KeyValue conforming to the -// "process.args_count" semantic conventions. It represents the length of the -// process.command_args array. -func ProcessArgsCount(val int) attribute.KeyValue { - return ProcessArgsCountKey.Int(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be set -// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the -// first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the command -// arguments (including the command/executable itself) as received by the -// process. On Linux-based systems (and some other Unixoid systems supporting -// procfs), can be set according to the list of null-delimited strings extracted -// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full -// argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if -// you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and time -// the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessExecutableBuildIDGnu returns an attribute KeyValue conforming to the -// "process.executable.build_id.gnu" semantic conventions. It represents the GNU -// build ID as found in the `.note.gnu.build-id` ELF section (hex string). -func ProcessExecutableBuildIDGnu(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGnuKey.String(val) -} - -// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the -// "process.executable.build_id.go" semantic conventions. It represents the Go -// build ID as retrieved by `go tool buildid `. -func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGoKey.String(val) -} - -// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to -// the "process.executable.build_id.htlhash" semantic conventions. It represents -// the profiling specific build ID for executables. See the OTel specification -// for Profiles for more information. -func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { - return ProcessExecutableBuildIDHtlhashKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of the -// process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path to -// the process executable. On Linux based systems, can be set to the target of -// `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time the -// process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of the -// process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessLinuxCgroup returns an attribute KeyValue conforming to the -// "process.linux.cgroup" semantic conventions. It represents the control group -// associated with the process. -func ProcessLinuxCgroup(val string) attribute.KeyValue { - return ProcessLinuxCgroupKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" -// semantic conventions. It represents the username of the user that owns the -// process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user ID -// (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username of -// the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessTitle returns an attribute KeyValue conforming to the "process.title" -// semantic conventions. It represents the process title (proctitle). -func ProcessTitle(val string) attribute.KeyValue { - return ProcessTitleKey.String(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" -// semantic conventions. It represents the virtual process identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// ProcessWorkingDirectory returns an attribute KeyValue conforming to the -// "process.working_directory" semantic conventions. It represents the working -// directory of the process. -func ProcessWorkingDirectory(val string) attribute.KeyValue { - return ProcessWorkingDirectoryKey.String(val) -} - -// Enum values for process.context_switch_type -var ( - // voluntary - // Stability: development - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - // Stability: development - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -// Enum values for process.paging.fault_type -var ( - // major - // Stability: development - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - // Stability: development - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// Namespace: profile -const ( - // ProfileFrameTypeKey is the attribute Key conforming to the - // "profile.frame.type" semantic conventions. It represents the describes the - // interpreter or compiler of a single frame. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpython" - ProfileFrameTypeKey = attribute.Key("profile.frame.type") -) - -// Enum values for profile.frame.type -var ( - // [.NET] - // - // Stability: development - // - // [.NET]: https://wikipedia.org/wiki/.NET - ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") - // [JVM] - // - // Stability: development - // - // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine - ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") - // [Kernel] - // - // Stability: development - // - // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) - ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") - // [C], [C++], [Go], [Rust] - // - // Stability: development - // - // [C]: https://wikipedia.org/wiki/C_(programming_language) - // [C++]: https://wikipedia.org/wiki/C%2B%2B - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") - // [Perl] - // - // Stability: development - // - // [Perl]: https://wikipedia.org/wiki/Perl - ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") - // [PHP] - // - // Stability: development - // - // [PHP]: https://wikipedia.org/wiki/PHP - ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") - // [Python] - // - // Stability: development - // - // [Python]: https://wikipedia.org/wiki/Python_(programming_language) - ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") - // [Ruby] - // - // Stability: development - // - // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) - ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") - // [V8JS] - // - // Stability: development - // - // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) - ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") - // [Erlang] - // - // Stability: development - // - // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) - ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") -) - -// Namespace: rpc -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes] of the Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [error codes]: https://connect.build/docs/protocol/#error-codes - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the - // [numeric status code] of the gRPC request. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` - // property of response if it is an error response. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Parse error", "User already exists" - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, string, - // `null` or missing (for notifications), value is expected to be cast to string - // for simplicity. Use empty string in case of `null` value. Omit entirely if - // this is a notification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10", "request-7", "" - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2.0", "1.0" - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It represents the mUST be calculated as two different - // counters starting from `1` one for sent messages and one for received - // message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" - // semantic conventions. It represents the whether this is a received or sent - // message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic - // conventions. It represents the name of the (logical) method being called, - // must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: exampleMethod - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function.name` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic - // conventions. It represents the full (logical) name of the service being - // called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myservice.EchoService - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic - // conventions. It represents a string identifying the remoting system. See - // below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCSystemKey = attribute.Key("rpc.system") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` -// property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property -// of request or response. Since protocol allows id to be int, string, `null` or -// missing (for notifications), value is expected to be cast to string for -// simplicity. Use empty string in case of `null` value. Omit entirely if this is -// a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version -// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't -// specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received message. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// Enum values for rpc.connect_rpc.error_code -var ( - // cancelled - // Stability: development - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - // Stability: development - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - // Stability: development - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - // Stability: development - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - // Stability: development - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - // Stability: development - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - // Stability: development - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - // Stability: development - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - // Stability: development - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - // Stability: development - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - // Stability: development - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - // Stability: development - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - // Stability: development - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - // Stability: development - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - // Stability: development - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - // Stability: development - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -// Enum values for rpc.grpc.status_code -var ( - // OK - // Stability: development - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - // Stability: development - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - // Stability: development - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - // Stability: development - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - // Stability: development - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - // Stability: development - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - // Stability: development - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - // Stability: development - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - // Stability: development - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - // Stability: development - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - // Stability: development - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - // Stability: development - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - // Stability: development - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - // Stability: development - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - // Stability: development - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - // Stability: development - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - // Stability: development - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Enum values for rpc.message.type -var ( - // sent - // Stability: development - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - // Stability: development - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -// Enum values for rpc.system -var ( - // gRPC - // Stability: development - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - // Stability: development - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - // Stability: development - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - // Stability: development - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - // Stability: development - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// Namespace: security_rule -const ( - // SecurityRuleCategoryKey is the attribute Key conforming to the - // "security_rule.category" semantic conventions. It represents a categorization - // value keyword used by the entity using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Attempted Information Leak" - SecurityRuleCategoryKey = attribute.Key("security_rule.category") - - // SecurityRuleDescriptionKey is the attribute Key conforming to the - // "security_rule.description" semantic conventions. It represents the - // description of the rule generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Block requests to public DNS over HTTPS / TLS protocols" - SecurityRuleDescriptionKey = attribute.Key("security_rule.description") - - // SecurityRuleLicenseKey is the attribute Key conforming to the - // "security_rule.license" semantic conventions. It represents the name of the - // license under which the rule used to generate this event is made available. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apache 2.0" - SecurityRuleLicenseKey = attribute.Key("security_rule.license") - - // SecurityRuleNameKey is the attribute Key conforming to the - // "security_rule.name" semantic conventions. It represents the name of the rule - // or signature generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BLOCK_DNS_over_TLS" - SecurityRuleNameKey = attribute.Key("security_rule.name") - - // SecurityRuleReferenceKey is the attribute Key conforming to the - // "security_rule.reference" semantic conventions. It represents the reference - // URL to additional information about the rule used to generate this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" - // Note: The URL can point to the vendor’s documentation about the rule. If - // that’s not available, it can also be a link to a more general page - // describing this type of alert. - SecurityRuleReferenceKey = attribute.Key("security_rule.reference") - - // SecurityRuleRulesetNameKey is the attribute Key conforming to the - // "security_rule.ruleset.name" semantic conventions. It represents the name of - // the ruleset, policy, group, or parent category in which the rule used to - // generate this event is a member. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Standard_Protocol_Filters" - SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") - - // SecurityRuleUUIDKey is the attribute Key conforming to the - // "security_rule.uuid" semantic conventions. It represents a rule ID that is - // unique within the scope of a set or group of agents, observers, or other - // entities using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" - SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") - - // SecurityRuleVersionKey is the attribute Key conforming to the - // "security_rule.version" semantic conventions. It represents the version / - // revision of the rule being used for analysis. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.0.0" - SecurityRuleVersionKey = attribute.Key("security_rule.version") -) - -// SecurityRuleCategory returns an attribute KeyValue conforming to the -// "security_rule.category" semantic conventions. It represents a categorization -// value keyword used by the entity using the rule for detection of this event. -func SecurityRuleCategory(val string) attribute.KeyValue { - return SecurityRuleCategoryKey.String(val) -} - -// SecurityRuleDescription returns an attribute KeyValue conforming to the -// "security_rule.description" semantic conventions. It represents the -// description of the rule generating the event. -func SecurityRuleDescription(val string) attribute.KeyValue { - return SecurityRuleDescriptionKey.String(val) -} - -// SecurityRuleLicense returns an attribute KeyValue conforming to the -// "security_rule.license" semantic conventions. It represents the name of the -// license under which the rule used to generate this event is made available. -func SecurityRuleLicense(val string) attribute.KeyValue { - return SecurityRuleLicenseKey.String(val) -} - -// SecurityRuleName returns an attribute KeyValue conforming to the -// "security_rule.name" semantic conventions. It represents the name of the rule -// or signature generating the event. -func SecurityRuleName(val string) attribute.KeyValue { - return SecurityRuleNameKey.String(val) -} - -// SecurityRuleReference returns an attribute KeyValue conforming to the -// "security_rule.reference" semantic conventions. It represents the reference -// URL to additional information about the rule used to generate this event. -func SecurityRuleReference(val string) attribute.KeyValue { - return SecurityRuleReferenceKey.String(val) -} - -// SecurityRuleRulesetName returns an attribute KeyValue conforming to the -// "security_rule.ruleset.name" semantic conventions. It represents the name of -// the ruleset, policy, group, or parent category in which the rule used to -// generate this event is a member. -func SecurityRuleRulesetName(val string) attribute.KeyValue { - return SecurityRuleRulesetNameKey.String(val) -} - -// SecurityRuleUUID returns an attribute KeyValue conforming to the -// "security_rule.uuid" semantic conventions. It represents a rule ID that is -// unique within the scope of a set or group of agents, observers, or other -// entities using the rule for detection of this event. -func SecurityRuleUUID(val string) attribute.KeyValue { - return SecurityRuleUUIDKey.String(val) -} - -// SecurityRuleVersion returns an attribute KeyValue conforming to the -// "security_rule.version" semantic conventions. It represents the version / -// revision of the rule being used for analysis. -func SecurityRuleVersion(val string) attribute.KeyValue { - return SecurityRuleVersionKey.String(val) -} - -// Namespace: server -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.address` SHOULD represent the server address behind any - // intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" semantic - // conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.port` SHOULD represent the server port behind any - // intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the "server.address" -// semantic conventions. It represents the server domain name if available -// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// Namespace: service -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID of - // the service instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "627cc493-f310-47de-96bd-71410b7dec09" - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to - // distinguish instances of the same service that exist at the same time (e.g. - // instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random Version 1 - // or Version 4 [RFC - // 4122] UUID, but are free to use an inherent unique ID as - // the source of - // this value if stability is desirable. In that case, the ID SHOULD be used as - // source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the purposes of - // identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`] file, the underlying - // data, such as pod name and namespace should be treated as confidential, being - // the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we do - // not recommend using one identifier - // for all processes participating in the application. Instead, it's recommended - // each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it can't - // unambiguously determine the - // service instance that is generating that telemetry. For instance, creating an - // UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container within - // that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, as - // they know the target address and - // port. - // - // [RFC - // 4122]: https://www.ietf.org/rfc/rfc4122.txt - // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" semantic - // conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "shoppingcart" - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. - // If `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - // - // [`process.executable.name`]: process.md - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Shop" - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace - // defined (so the empty/unspecified namespace is simply one more valid - // namespace). Zero-length namespace string is assumed equal to unspecified - // namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the "service.version" - // semantic conventions. It represents the version string of the service API or - // implementation. The format is not defined by these conventions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "2.0.0", "a01dbef8a" - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of the -// service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the "service.name" -// semantic conventions. It represents the logical name of the service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Namespace: session -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" semantic - // conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// Namespace: signalr -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the signalR - // HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "app_shutdown", "timeout" - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the - // [SignalR transport type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "web_sockets", "long_polling" - // - // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md - SignalrTransportKey = attribute.Key("signalr.transport") -) - -// Enum values for signalr.connection.status -var ( - // The connection was closed normally. - // Stability: stable - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout. - // Stability: stable - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down. - // Stability: stable - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -// Enum values for signalr.transport -var ( - // ServerSentEvents protocol - // Stability: stable - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - // Stability: stable - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - // Stability: stable - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Namespace: source -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the destination side, and when communicating through - // an intermediary, `source.address` SHOULD represent the source address behind - // any intermediaries, for example proxies, if it's available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" semantic - // conventions. It represents the source port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the "source.address" -// semantic conventions. It represents the source address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number. -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Namespace: system -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the logical - // CPU number [0..n-1]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "(identifier)" - SystemDeviceKey = attribute.Key("system.device") - - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the filesystem - // mode. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "rw, ro" - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/mnt/data" - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the filesystem - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "used" - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the filesystem - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ext4" - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") - - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free", "cached" - SystemMemoryStateKey = attribute.Key("system.memory.state") - - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "in" - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory paging - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free" - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory paging - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "minor" - SystemPagingTypeKey = attribute.Key("system.paging.type") - - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State Codes]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "running" - // - // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1]. -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// SystemDevice returns an attribute KeyValue conforming to the "system.device" -// semantic conventions. It represents the device identifier. -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode. -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the -// "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path. -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Enum values for system.filesystem.state -var ( - // used - // Stability: development - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - // Stability: development - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - // Stability: development - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -// Enum values for system.filesystem.type -var ( - // fat32 - // Stability: development - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - // Stability: development - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - // Stability: development - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - // Stability: development - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - // Stability: development - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - // Stability: development - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// Enum values for system.memory.state -var ( - // used - // Stability: development - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - // Stability: development - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // Deprecated: Removed, report shared memory usage with - // `metric.system.memory.shared` metric. - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - // Stability: development - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - // Stability: development - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Enum values for system.paging.direction -var ( - // in - // Stability: development - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - // Stability: development - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -// Enum values for system.paging.state -var ( - // used - // Stability: development - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - // Stability: development - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -// Enum values for system.paging.type -var ( - // major - // Stability: development - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - // Stability: development - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Enum values for system.process.status -var ( - // running - // Stability: development - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - // Stability: development - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - // Stability: development - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - // Stability: development - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Namespace: telemetry -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of the - // auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "parts-unlimited-java" - // Note: Official auto instrumentation agents and distributions SHOULD set the - // `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the version - // string of the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2.3" - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the language of - // the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "opentelemetry" - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to - // `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is used, - // this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module name of - // this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.2.3" - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version string -// of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// Enum values for telemetry.sdk.language -var ( - // cpp - // Stability: stable - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - // Stability: stable - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - // Stability: stable - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - // Stability: stable - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - // Stability: stable - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - // Stability: stable - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - // Stability: stable - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - // Stability: stable - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - // Stability: stable - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - // Stability: stable - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - // Stability: stable - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - // Stability: stable - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// Namespace: test -const ( - // TestCaseNameKey is the attribute Key conforming to the "test.case.name" - // semantic conventions. It represents the fully qualified human readable name - // of the [test case]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", - // "ExampleTestCase1_test1" - // - // [test case]: https://wikipedia.org/wiki/Test_case - TestCaseNameKey = attribute.Key("test.case.name") - - // TestCaseResultStatusKey is the attribute Key conforming to the - // "test.case.result.status" semantic conventions. It represents the status of - // the actual test case result from test execution. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pass", "fail" - TestCaseResultStatusKey = attribute.Key("test.case.result.status") - - // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" - // semantic conventions. It represents the human readable name of a [test suite] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TestSuite1" - // - // [test suite]: https://wikipedia.org/wiki/Test_suite - TestSuiteNameKey = attribute.Key("test.suite.name") - - // TestSuiteRunStatusKey is the attribute Key conforming to the - // "test.suite.run.status" semantic conventions. It represents the status of the - // test suite run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "skipped", "aborted", "timed_out", - // "in_progress" - TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") -) - -// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" -// semantic conventions. It represents the fully qualified human readable name of -// the [test case]. -// -// [test case]: https://wikipedia.org/wiki/Test_case -func TestCaseName(val string) attribute.KeyValue { - return TestCaseNameKey.String(val) -} - -// TestSuiteName returns an attribute KeyValue conforming to the -// "test.suite.name" semantic conventions. It represents the human readable name -// of a [test suite]. -// -// [test suite]: https://wikipedia.org/wiki/Test_suite -func TestSuiteName(val string) attribute.KeyValue { - return TestSuiteNameKey.String(val) -} - -// Enum values for test.case.result.status -var ( - // pass - // Stability: development - TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") - // fail - // Stability: development - TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") -) - -// Enum values for test.suite.run.status -var ( - // success - // Stability: development - TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") - // failure - // Stability: development - TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") - // skipped - // Stability: development - TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") - // aborted - // Stability: development - TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") - // timed_out - // Stability: development - TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") - // in_progress - // Stability: development - TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") -) - -// Namespace: thread -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed to OS - // thread ID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic - // conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: main - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic -// conventions. It represents the current "managed" thread ID (as opposed to OS -// thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Namespace: tls -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic - // conventions. It represents the string indicating the [cipher] used during the - // current connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" - // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` - // of the [registered TLS Cipher Suits]. - // - // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 - // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the pEM-encoded - // stand-alone certificate offered by the client. This is usually - // mutually-exclusive of `client.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // client. This is usually mutually-exclusive of `client.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the date/Time - // indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the array - // of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the given - // cipher, when applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "secp256r1" - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the "tls.established" - // semantic conventions. It represents the boolean flag indicating if the TLS - // negotiation was successful and transitioned to an encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" - // semantic conventions. It represents the string indicating the protocol being - // tunneled. Per the values in the [IANA registry], this string should be lower - // case. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "http/1.1" - // - // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" - // semantic conventions. It represents the normalized lowercase protocol name - // parsed from original string of the negotiated [SSL/TLS protocol version]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [SSL/TLS protocol version]: https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric part - // of the version parsed from the original string of the negotiated - // [SSL/TLS protocol version]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2", "3" - // - // [SSL/TLS protocol version]: https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic - // conventions. It represents the boolean flag indicating if this TLS connection - // was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the pEM-encoded - // stand-alone certificate offered by the server. This is usually - // mutually-exclusive of `server.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // server. This is usually mutually-exclusive of `server.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" - // semantic conventions. It represents a hash that identifies servers based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the date/Time - // indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the server. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the [cipher] used -// during the current connection. -// -// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also exists -// in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// client. This is usually mutually-exclusive of `client.certificate` since that -// value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the client. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" -// semantic conventions. It represents a hash that identifies clients based on -// how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic -// conventions. It represents the string indicating the curve used for the given -// cipher, when applicable. -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string indicating -// the protocol being tunneled. Per the values in the [IANA registry], this -// string should be lower case. -// -// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part of -// the version parsed from the original string of the negotiated -// [SSL/TLS protocol version]. -// -// [SSL/TLS protocol version]: https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also exists -// in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// server. This is usually mutually-exclusive of `server.certificate` since that -// value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the server. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Enum values for tls.protocol.name -var ( - // ssl - // Stability: development - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - // Stability: development - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// Namespace: url -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" semantic - // conventions. It represents the domain extracted from the `url.full`, such as - // "opentelemetry.io". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", - // "[1080:0:0:0:8:800:200C:417A]" - // Note: In some cases a URL may refer to an IP and/or port directly, without a - // domain name. In this case, the IP address would go to the domain field. If - // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` - // and `]` characters should also be captured in the domain field. - // - // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from the - // `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: The file extension is only set if it exists, as not every url has a - // file extension. When the file name has multiple extensions `example.tar.gz`, - // only the last one should be captured `gz`, not `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic - // conventions. It represents the [URI fragment] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SemConv" - // - // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network resource - // according to [RFC3986]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the fragment - // is not transmitted over HTTP, but if it is known, it SHOULD be included - // nevertheless. - // - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. - // In such case username and password SHOULD be redacted and attribute's value - // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. - // - // `url.full` SHOULD capture the absolute URL when it is available (or can be - // reconstructed). - // - // Sensitive content provided in `url.full` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the - // value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `https://www.example.com/path?color=blue&sig=REDACTED`. - // - // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" semantic - // conventions. It represents the unmodified original URL as seen in the event - // source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", - // "search?q=OpenTelemetry" - // Note: In network monitoring, the observed URL may be a full URL, whereas in - // access logs, the URL is often just represented as a path. This field is meant - // to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI path] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/search" - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - // - // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full`. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI query] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "q=OpenTelemetry" - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `q=OpenTelemetry&sig=REDACTED`. - // - // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.com", "foo.co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // For example, the registered domain for `foo.example.com` is `example.com`. - // Trying to approximate this by simply taking the last two labels will not work - // well for TLDs such as `co.uk`. - // - // [public suffix list]: http://publicsuffix.org - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic - // conventions. It represents the [URI scheme] component identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https", "ftp", "telnet" - // - // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name under - // the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain contains - // all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "east", "sub2.sub1" - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the - // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the - // subdomain field should contain `sub2.sub1`, with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" semantic - // conventions. It represents the low-cardinality template of an - // [absolute path reference]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/users/{id}", "/users/:id", "/users?id={id}" - // - // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective top - // level domain (eTLD), also known as the domain suffix, is the last part of the - // domain name. For example, the top level domain for example.com is `com`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com", "co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // - // [public suffix list]: http://publicsuffix.org - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the `url.full`, -// such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the "url.extension" -// semantic conventions. It represents the file extension extracted from the -// `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the "url.fragment" -// semantic conventions. It represents the [URI fragment] component. -// -// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" semantic -// conventions. It represents the absolute URL describing a network resource -// according to [RFC3986]. -// -// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the "url.original" -// semantic conventions. It represents the unmodified original URL as seen in the -// event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" semantic -// conventions. It represents the [URI path] component. -// -// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" semantic -// conventions. It represents the port extracted from the `url.full`. -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic -// conventions. It represents the [URI query] component. -// -// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI scheme] component identifying the -// used protocol. -// -// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" -// semantic conventions. It represents the subdomain portion of a fully qualified -// domain name includes all of the names except the host name under the -// registered_domain. In a partially qualified domain, or if the qualification -// level of the full name cannot be determined, subdomain contains all of the -// names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the "url.template" -// semantic conventions. It represents the low-cardinality template of an -// [absolute path reference]. -// -// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of the -// domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Namespace: user -const ( - // UserEmailKey is the attribute Key conforming to the "user.email" semantic - // conventions. It represents the user email address. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein@example.com" - UserEmailKey = attribute.Key("user.email") - - // UserFullNameKey is the attribute Key conforming to the "user.full_name" - // semantic conventions. It represents the user's full name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Albert Einstein" - UserFullNameKey = attribute.Key("user.full_name") - - // UserHashKey is the attribute Key conforming to the "user.hash" semantic - // conventions. It represents the unique user hash to correlate information for - // a user in anonymized form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" - // Note: Useful if `user.id` or `user.name` contain confidential information and - // cannot be used. - UserHashKey = attribute.Key("user.hash") - - // UserIDKey is the attribute Key conforming to the "user.id" semantic - // conventions. It represents the unique identifier of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" - UserIDKey = attribute.Key("user.id") - - // UserNameKey is the attribute Key conforming to the "user.name" semantic - // conventions. It represents the short name or login/username of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein" - UserNameKey = attribute.Key("user.name") - - // UserRolesKey is the attribute Key conforming to the "user.roles" semantic - // conventions. It represents the array of user roles at the time of the event. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "admin", "reporting_user" - UserRolesKey = attribute.Key("user.roles") -) - -// UserEmail returns an attribute KeyValue conforming to the "user.email" -// semantic conventions. It represents the user email address. -func UserEmail(val string) attribute.KeyValue { - return UserEmailKey.String(val) -} - -// UserFullName returns an attribute KeyValue conforming to the "user.full_name" -// semantic conventions. It represents the user's full name. -func UserFullName(val string) attribute.KeyValue { - return UserFullNameKey.String(val) -} - -// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic -// conventions. It represents the unique user hash to correlate information for a -// user in anonymized form. -func UserHash(val string) attribute.KeyValue { - return UserHashKey.String(val) -} - -// UserID returns an attribute KeyValue conforming to the "user.id" semantic -// conventions. It represents the unique identifier of the user. -func UserID(val string) attribute.KeyValue { - return UserIDKey.String(val) -} - -// UserName returns an attribute KeyValue conforming to the "user.name" semantic -// conventions. It represents the short name or login/username of the user. -func UserName(val string) attribute.KeyValue { - return UserNameKey.String(val) -} - -// UserRoles returns an attribute KeyValue conforming to the "user.roles" -// semantic conventions. It represents the array of user roles at the time of the -// event. -func UserRoles(val ...string) attribute.KeyValue { - return UserRolesKey.StringSlice(val) -} - -// Namespace: user_agent -const ( - // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" - // semantic conventions. It represents the name of the user-agent extracted from - // original. Usually refers to the browser's name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Safari", "YourApp" - // Note: [Example] of extracting browser's name from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant name SHOULD be selected. In such a scenario it should align with - // `user_agent.version` - // - // [Example]: https://www.whatsmyua.info - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of the - // [HTTP User-Agent] header sent by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 - // grpc-java-okhttp/1.27.2" - // - // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentSyntheticTypeKey is the attribute Key conforming to the - // "user_agent.synthetic.type" semantic conventions. It represents the specifies - // the category of synthetic traffic, such as tests or bots. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute MAY be derived from the contents of the - // `user_agent.original` attribute. Components that populate the attribute are - // responsible for determining what they consider to be synthetic bot or test - // traffic. This attribute can either be set for self-identification purposes, - // or on telemetry detected to be generated as a result of a synthetic request. - // This attribute is useful for distinguishing between genuine client traffic - // and synthetic traffic generated by bots or tests. - UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of the - // user-agent extracted from original. Usually refers to the browser's version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.1.2", "1.0.0" - // Note: [Example] of extracting browser's version from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant version SHOULD be selected. In such a scenario it should align - // with `user_agent.name` - // - // [Example]: https://www.whatsmyua.info - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP User-Agent] header sent by the client. -// -// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version. -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// Enum values for user_agent.synthetic.type -var ( - // Bot source. - // Stability: development - UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") - // Synthetic test source. - // Stability: development - UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") -) - -// Namespace: vcs -const ( - // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" - // semantic conventions. It represents the ID of the change (pull request/merge - // request/changelist) if applicable. This is usually a unique (within - // repository) identifier generated by the VCS system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - VCSChangeIDKey = attribute.Key("vcs.change.id") - - // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" - // semantic conventions. It represents the state of the change (pull - // request/merge request/changelist). - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "open", "closed", "merged" - VCSChangeStateKey = attribute.Key("vcs.change.state") - - // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" - // semantic conventions. It represents the human readable title of the change - // (pull request/merge request/changelist). This title is often a brief summary - // of the change and may get merged in to a ref as the commit summary. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update - // dependency" - VCSChangeTitleKey = attribute.Key("vcs.change.title") - - // VCSLineChangeTypeKey is the attribute Key conforming to the - // "vcs.line_change.type" semantic conventions. It represents the type of line - // change being measured on a branch or change. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "added", "removed" - VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") - - // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") - - // VCSRefBaseRevisionKey is the attribute Key conforming to the - // "vcs.ref.base.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. The - // revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.base.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") - - // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") - - // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") - - // VCSRefHeadRevisionKey is the attribute Key conforming to the - // "vcs.ref.head.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `head` refers to where you are right now; the current reference at a - // given time.The revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.head.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") - - // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") - - // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic - // conventions. It represents the type of the [reference] in the repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefTypeKey = attribute.Key("vcs.ref.type") - - // VCSRepositoryNameKey is the attribute Key conforming to the - // "vcs.repository.name" semantic conventions. It represents the human readable - // name of the repository. It SHOULD NOT include any additional identifier like - // Group/SubGroup in GitLab or organization in GitHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "semantic-conventions", "my-cool-repo" - // Note: Due to it only being the name, it can clash with forks of the same - // repository if collecting telemetry across multiple orgs or groups in - // the same backends. - VCSRepositoryNameKey = attribute.Key("vcs.repository.name") - - // VCSRepositoryURLFullKey is the attribute Key conforming to the - // "vcs.repository.url.full" semantic conventions. It represents the - // [canonical URL] of the repository providing the complete HTTP(S) address in - // order to locate and identify the repository through a browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/opentelemetry/open-telemetry-collector-contrib", - // "https://gitlab.com/my-org/my-project/my-projects-project/repo" - // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include - // the `.git` extension. - // - // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. - VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") - - // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the - // "vcs.revision_delta.direction" semantic conventions. It represents the type - // of revision comparison. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ahead", "behind" - VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") -) - -// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" -// semantic conventions. It represents the ID of the change (pull request/merge -// request/changelist) if applicable. This is usually a unique (within -// repository) identifier generated by the VCS system. -func VCSChangeID(val string) attribute.KeyValue { - return VCSChangeIDKey.String(val) -} - -// VCSChangeTitle returns an attribute KeyValue conforming to the -// "vcs.change.title" semantic conventions. It represents the human readable -// title of the change (pull request/merge request/changelist). This title is -// often a brief summary of the change and may get merged in to a ref as the -// commit summary. -func VCSChangeTitle(val string) attribute.KeyValue { - return VCSChangeTitleKey.String(val) -} - -// VCSRefBaseName returns an attribute KeyValue conforming to the -// "vcs.ref.base.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefBaseName(val string) attribute.KeyValue { - return VCSRefBaseNameKey.String(val) -} - -// VCSRefBaseRevision returns an attribute KeyValue conforming to the -// "vcs.ref.base.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefBaseRevision(val string) attribute.KeyValue { - return VCSRefBaseRevisionKey.String(val) -} - -// VCSRefHeadName returns an attribute KeyValue conforming to the -// "vcs.ref.head.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefHeadName(val string) attribute.KeyValue { - return VCSRefHeadNameKey.String(val) -} - -// VCSRefHeadRevision returns an attribute KeyValue conforming to the -// "vcs.ref.head.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefHeadRevision(val string) attribute.KeyValue { - return VCSRefHeadRevisionKey.String(val) -} - -// VCSRepositoryName returns an attribute KeyValue conforming to the -// "vcs.repository.name" semantic conventions. It represents the human readable -// name of the repository. It SHOULD NOT include any additional identifier like -// Group/SubGroup in GitLab or organization in GitHub. -func VCSRepositoryName(val string) attribute.KeyValue { - return VCSRepositoryNameKey.String(val) -} - -// VCSRepositoryURLFull returns an attribute KeyValue conforming to the -// "vcs.repository.url.full" semantic conventions. It represents the -// [canonical URL] of the repository providing the complete HTTP(S) address in -// order to locate and identify the repository through a browser. -// -// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. -func VCSRepositoryURLFull(val string) attribute.KeyValue { - return VCSRepositoryURLFullKey.String(val) -} - -// Enum values for vcs.change.state -var ( - // Open means the change is currently active and under review. It hasn't been - // merged into the target branch yet, and it's still possible to make changes or - // add comments. - // Stability: development - VCSChangeStateOpen = VCSChangeStateKey.String("open") - // WIP (work-in-progress, draft) means the change is still in progress and not - // yet ready for a full review. It might still undergo significant changes. - // Stability: development - VCSChangeStateWip = VCSChangeStateKey.String("wip") - // Closed means the merge request has been closed without merging. This can - // happen for various reasons, such as the changes being deemed unnecessary, the - // issue being resolved in another way, or the author deciding to withdraw the - // request. - // Stability: development - VCSChangeStateClosed = VCSChangeStateKey.String("closed") - // Merged indicates that the change has been successfully integrated into the - // target codebase. - // Stability: development - VCSChangeStateMerged = VCSChangeStateKey.String("merged") -) - -// Enum values for vcs.line_change.type -var ( - // How many lines were added. - // Stability: development - VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") - // How many lines were removed. - // Stability: development - VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") -) - -// Enum values for vcs.ref.base.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") -) - -// Enum values for vcs.ref.head.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") -) - -// Enum values for vcs.ref.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefTypeBranch = VCSRefTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefTypeTag = VCSRefTypeKey.String("tag") -) - -// Enum values for vcs.revision_delta.direction -var ( - // How many revisions the change is behind the target ref. - // Stability: development - VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") - // How many revisions the change is ahead of the target ref. - // Stability: development - VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") -) - -// Namespace: webengine -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the additional - // description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final" - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly" - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of the - // web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "21.0.0" - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" -// semantic conventions. It represents the name of the web engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the web -// engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/doc.go deleted file mode 100644 index 787f5b0f..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.30.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.30.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/exception.go deleted file mode 100644 index 4332a795..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.30.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/metric.go deleted file mode 100644 index fe6beb91..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/metric.go +++ /dev/null @@ -1,1750 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.30.0" - -const ( - // AzureCosmosDBClientActiveInstanceCount is the metric conforming to the - // "azure.cosmosdb.client.active_instance.count" semantic conventions. It - // represents the number of active client instances. - // Instrument: updowncounter - // Unit: {instance} - // Stability: development - AzureCosmosDBClientActiveInstanceCountName = "azure.cosmosdb.client.active_instance.count" - AzureCosmosDBClientActiveInstanceCountUnit = "{instance}" - AzureCosmosDBClientActiveInstanceCountDescription = "Number of active client instances" - // AzureCosmosDBClientOperationRequestCharge is the metric conforming to the - // "azure.cosmosdb.client.operation.request_charge" semantic conventions. It - // represents the [Request units] consumed by the operation. - // - // [Request units]: https://learn.microsoft.com/azure/cosmos-db/request-units - // Instrument: histogram - // Unit: {request_unit} - // Stability: development - AzureCosmosDBClientOperationRequestChargeName = "azure.cosmosdb.client.operation.request_charge" - AzureCosmosDBClientOperationRequestChargeUnit = "{request_unit}" - AzureCosmosDBClientOperationRequestChargeDescription = "[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation" - // CICDPipelineRunActive is the metric conforming to the - // "cicd.pipeline.run.active" semantic conventions. It represents the number of - // pipeline runs currently active in the system by state. - // Instrument: updowncounter - // Unit: {run} - // Stability: development - CICDPipelineRunActiveName = "cicd.pipeline.run.active" - CICDPipelineRunActiveUnit = "{run}" - CICDPipelineRunActiveDescription = "The number of pipeline runs currently active in the system by state." - // CICDPipelineRunDuration is the metric conforming to the - // "cicd.pipeline.run.duration" semantic conventions. It represents the - // duration of a pipeline run grouped by pipeline, state and result. - // Instrument: histogram - // Unit: s - // Stability: development - CICDPipelineRunDurationName = "cicd.pipeline.run.duration" - CICDPipelineRunDurationUnit = "s" - CICDPipelineRunDurationDescription = "Duration of a pipeline run grouped by pipeline, state and result." - // CICDPipelineRunErrors is the metric conforming to the - // "cicd.pipeline.run.errors" semantic conventions. It represents the number of - // errors encountered in pipeline runs (eg. compile, test failures). - // Instrument: counter - // Unit: {error} - // Stability: development - CICDPipelineRunErrorsName = "cicd.pipeline.run.errors" - CICDPipelineRunErrorsUnit = "{error}" - CICDPipelineRunErrorsDescription = "The number of errors encountered in pipeline runs (eg. compile, test failures)." - // CICDSystemErrors is the metric conforming to the "cicd.system.errors" - // semantic conventions. It represents the number of errors in a component of - // the CICD system (eg. controller, scheduler, agent). - // Instrument: counter - // Unit: {error} - // Stability: development - CICDSystemErrorsName = "cicd.system.errors" - CICDSystemErrorsUnit = "{error}" - CICDSystemErrorsDescription = "The number of errors in a component of the CICD system (eg. controller, scheduler, agent)." - // CICDWorkerCount is the metric conforming to the "cicd.worker.count" semantic - // conventions. It represents the number of workers on the CICD system by - // state. - // Instrument: updowncounter - // Unit: {count} - // Stability: development - CICDWorkerCountName = "cicd.worker.count" - CICDWorkerCountUnit = "{count}" - CICDWorkerCountDescription = "The number of workers on the CICD system by state." - // ContainerCPUTime is the metric conforming to the "container.cpu.time" - // semantic conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: development - ContainerCPUTimeName = "container.cpu.time" - ContainerCPUTimeUnit = "s" - ContainerCPUTimeDescription = "Total CPU time consumed" - // ContainerCPUUsage is the metric conforming to the "container.cpu.usage" - // semantic conventions. It represents the container's CPU usage, measured in - // cpus. Range from 0 to the number of allocatable CPUs. - // Instrument: gauge - // Unit: {cpu} - // Stability: development - ContainerCPUUsageName = "container.cpu.usage" - ContainerCPUUsageUnit = "{cpu}" - ContainerCPUUsageDescription = "Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs" - // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic - // conventions. It represents the disk bytes for the container. - // Instrument: counter - // Unit: By - // Stability: development - ContainerDiskIoName = "container.disk.io" - ContainerDiskIoUnit = "By" - ContainerDiskIoDescription = "Disk bytes for the container." - // ContainerMemoryUsage is the metric conforming to the - // "container.memory.usage" semantic conventions. It represents the memory - // usage of the container. - // Instrument: counter - // Unit: By - // Stability: development - ContainerMemoryUsageName = "container.memory.usage" - ContainerMemoryUsageUnit = "By" - ContainerMemoryUsageDescription = "Memory usage of the container." - // ContainerNetworkIo is the metric conforming to the "container.network.io" - // semantic conventions. It represents the network bytes for the container. - // Instrument: counter - // Unit: By - // Stability: development - ContainerNetworkIoName = "container.network.io" - ContainerNetworkIoUnit = "By" - ContainerNetworkIoDescription = "Network bytes for the container." - // ContainerUptime is the metric conforming to the "container.uptime" semantic - // conventions. It represents the time the container has been running. - // Instrument: gauge - // Unit: s - // Stability: development - ContainerUptimeName = "container.uptime" - ContainerUptimeUnit = "s" - ContainerUptimeDescription = "The time the container has been running" - // DBClientConnectionCount is the metric conforming to the - // "db.client.connection.count" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - DBClientConnectionCountName = "db.client.connection.count" - DBClientConnectionCountUnit = "{connection}" - DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" - // DBClientConnectionCreateTime is the metric conforming to the - // "db.client.connection.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: s - // Stability: development - DBClientConnectionCreateTimeName = "db.client.connection.create_time" - DBClientConnectionCreateTimeUnit = "s" - DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" - // DBClientConnectionIdleMax is the metric conforming to the - // "db.client.connection.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - DBClientConnectionIdleMaxName = "db.client.connection.idle.max" - DBClientConnectionIdleMaxUnit = "{connection}" - DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" - // DBClientConnectionIdleMin is the metric conforming to the - // "db.client.connection.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - DBClientConnectionIdleMinName = "db.client.connection.idle.min" - DBClientConnectionIdleMinUnit = "{connection}" - DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" - // DBClientConnectionMax is the metric conforming to the - // "db.client.connection.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - DBClientConnectionMaxName = "db.client.connection.max" - DBClientConnectionMaxUnit = "{connection}" - DBClientConnectionMaxDescription = "The maximum number of open connections allowed" - // DBClientConnectionPendingRequests is the metric conforming to the - // "db.client.connection.pending_requests" semantic conventions. It represents - // the number of current pending requests for an open connection. - // Instrument: updowncounter - // Unit: {request} - // Stability: development - DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" - DBClientConnectionPendingRequestsUnit = "{request}" - DBClientConnectionPendingRequestsDescription = "The number of current pending requests for an open connection" - // DBClientConnectionTimeouts is the metric conforming to the - // "db.client.connection.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: development - DBClientConnectionTimeoutsName = "db.client.connection.timeouts" - DBClientConnectionTimeoutsUnit = "{timeout}" - DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - // DBClientConnectionUseTime is the metric conforming to the - // "db.client.connection.use_time" semantic conventions. It represents the time - // between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: s - // Stability: development - DBClientConnectionUseTimeName = "db.client.connection.use_time" - DBClientConnectionUseTimeUnit = "s" - DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - // DBClientConnectionWaitTime is the metric conforming to the - // "db.client.connection.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: s - // Stability: development - DBClientConnectionWaitTimeName = "db.client.connection.wait_time" - DBClientConnectionWaitTimeUnit = "s" - DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.create_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: development - // Deprecated: Replaced by `db.client.connection.create_time`. Note: the unit also changed from `ms` to `s`. - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - // Deprecated: Replaced by `db.client.connection.idle.max`. - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // deprecated, use `db.client.connection.idle.min` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - // Deprecated: Replaced by `db.client.connection.idle.min`. - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the - // deprecated, use `db.client.connection.max` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - // Deprecated: Replaced by `db.client.connection.max`. - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the deprecated, use `db.client.connection.pending_requests` instead. - // Instrument: updowncounter - // Unit: {request} - // Stability: development - // Deprecated: Replaced by `db.client.connection.pending_requests`. - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // deprecated, use `db.client.connection.timeouts` instead. - // Instrument: counter - // Unit: {timeout} - // Stability: development - // Deprecated: Replaced by `db.client.connection.timeouts`. - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the - // deprecated, use `db.client.connection.count` instead. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - // Deprecated: Replaced by `db.client.connection.count`. - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.use_time` instead. Note: the unit also - // changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: development - // Deprecated: Replaced by `db.client.connection.use_time`. Note: the unit also changed from `ms` to `s`. - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // deprecated, use `db.client.connection.wait_time` instead. Note: the unit - // also changed from `ms` to `s`. - // Instrument: histogram - // Unit: ms - // Stability: development - // Deprecated: Replaced by `db.client.connection.wait_time`. Note: the unit also changed from `ms` to `s`. - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." - // DBClientCosmosDBActiveInstanceCount is the metric conforming to the - // "db.client.cosmosdb.active_instance.count" semantic conventions. It - // represents the deprecated, use `azure.cosmosdb.client.active_instance.count` - // instead. - // Instrument: updowncounter - // Unit: {instance} - // Stability: development - // Deprecated: Replaced by `azure.cosmosdb.client.active_instance.count`. - DBClientCosmosDBActiveInstanceCountName = "db.client.cosmosdb.active_instance.count" - DBClientCosmosDBActiveInstanceCountUnit = "{instance}" - DBClientCosmosDBActiveInstanceCountDescription = "Deprecated, use `azure.cosmosdb.client.active_instance.count` instead." - // DBClientCosmosDBOperationRequestCharge is the metric conforming to the - // "db.client.cosmosdb.operation.request_charge" semantic conventions. It - // represents the deprecated, use - // `azure.cosmosdb.client.operation.request_charge` instead. - // Instrument: histogram - // Unit: {request_unit} - // Stability: development - // Deprecated: Replaced by `azure.cosmosdb.client.operation.request_charge`. - DBClientCosmosDBOperationRequestChargeName = "db.client.cosmosdb.operation.request_charge" - DBClientCosmosDBOperationRequestChargeUnit = "{request_unit}" - DBClientCosmosDBOperationRequestChargeDescription = "Deprecated, use `azure.cosmosdb.client.operation.request_charge` instead." - // DBClientOperationDuration is the metric conforming to the - // "db.client.operation.duration" semantic conventions. It represents the - // duration of database client operations. - // Instrument: histogram - // Unit: s - // Stability: release_candidate - DBClientOperationDurationName = "db.client.operation.duration" - DBClientOperationDurationUnit = "s" - DBClientOperationDurationDescription = "Duration of database client operations." - // DBClientResponseReturnedRows is the metric conforming to the - // "db.client.response.returned_rows" semantic conventions. It represents the - // actual number of records returned by the database operation. - // Instrument: histogram - // Unit: {row} - // Stability: development - DBClientResponseReturnedRowsName = "db.client.response.returned_rows" - DBClientResponseReturnedRowsUnit = "{row}" - DBClientResponseReturnedRowsDescription = "The actual number of records returned by the database operation." - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: development - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: development - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: development - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: development - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: development - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: development - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: development - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: development - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: development - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: development - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - // GenAIClientOperationDuration is the metric conforming to the - // "gen_ai.client.operation.duration" semantic conventions. It represents the - // genAI operation duration. - // Instrument: histogram - // Unit: s - // Stability: development - GenAIClientOperationDurationName = "gen_ai.client.operation.duration" - GenAIClientOperationDurationUnit = "s" - GenAIClientOperationDurationDescription = "GenAI operation duration" - // GenAIClientTokenUsage is the metric conforming to the - // "gen_ai.client.token.usage" semantic conventions. It represents the measures - // number of input and output tokens used. - // Instrument: histogram - // Unit: {token} - // Stability: development - GenAIClientTokenUsageName = "gen_ai.client.token.usage" - GenAIClientTokenUsageUnit = "{token}" - GenAIClientTokenUsageDescription = "Measures number of input and output tokens used" - // GenAIServerRequestDuration is the metric conforming to the - // "gen_ai.server.request.duration" semantic conventions. It represents the - // generative AI server request duration such as time-to-last byte or last - // output token. - // Instrument: histogram - // Unit: s - // Stability: development - GenAIServerRequestDurationName = "gen_ai.server.request.duration" - GenAIServerRequestDurationUnit = "s" - GenAIServerRequestDurationDescription = "Generative AI server request duration such as time-to-last byte or last output token" - // GenAIServerTimePerOutputToken is the metric conforming to the - // "gen_ai.server.time_per_output_token" semantic conventions. It represents - // the time per output token generated after the first token for successful - // responses. - // Instrument: histogram - // Unit: s - // Stability: development - GenAIServerTimePerOutputTokenName = "gen_ai.server.time_per_output_token" - GenAIServerTimePerOutputTokenUnit = "s" - GenAIServerTimePerOutputTokenDescription = "Time per output token generated after the first token for successful responses" - // GenAIServerTimeToFirstToken is the metric conforming to the - // "gen_ai.server.time_to_first_token" semantic conventions. It represents the - // time to generate first token for successful responses. - // Instrument: histogram - // Unit: s - // Stability: development - GenAIServerTimeToFirstTokenName = "gen_ai.server.time_to_first_token" - GenAIServerTimeToFirstTokenUnit = "s" - GenAIServerTimeToFirstTokenDescription = "Time to generate first token for successful responses" - // GoConfigGogc is the metric conforming to the "go.config.gogc" semantic - // conventions. It represents the heap size target percentage configured by the - // user, otherwise 100. - // Instrument: updowncounter - // Unit: % - // Stability: development - GoConfigGogcName = "go.config.gogc" - GoConfigGogcUnit = "%" - GoConfigGogcDescription = "Heap size target percentage configured by the user, otherwise 100." - // GoGoroutineCount is the metric conforming to the "go.goroutine.count" - // semantic conventions. It represents the count of live goroutines. - // Instrument: updowncounter - // Unit: {goroutine} - // Stability: development - GoGoroutineCountName = "go.goroutine.count" - GoGoroutineCountUnit = "{goroutine}" - GoGoroutineCountDescription = "Count of live goroutines." - // GoMemoryAllocated is the metric conforming to the "go.memory.allocated" - // semantic conventions. It represents the memory allocated to the heap by the - // application. - // Instrument: counter - // Unit: By - // Stability: development - GoMemoryAllocatedName = "go.memory.allocated" - GoMemoryAllocatedUnit = "By" - GoMemoryAllocatedDescription = "Memory allocated to the heap by the application." - // GoMemoryAllocations is the metric conforming to the "go.memory.allocations" - // semantic conventions. It represents the count of allocations to the heap by - // the application. - // Instrument: counter - // Unit: {allocation} - // Stability: development - GoMemoryAllocationsName = "go.memory.allocations" - GoMemoryAllocationsUnit = "{allocation}" - GoMemoryAllocationsDescription = "Count of allocations to the heap by the application." - // GoMemoryGCGoal is the metric conforming to the "go.memory.gc.goal" semantic - // conventions. It represents the heap size target for the end of the GC cycle. - // Instrument: updowncounter - // Unit: By - // Stability: development - GoMemoryGCGoalName = "go.memory.gc.goal" - GoMemoryGCGoalUnit = "By" - GoMemoryGCGoalDescription = "Heap size target for the end of the GC cycle." - // GoMemoryLimit is the metric conforming to the "go.memory.limit" semantic - // conventions. It represents the go runtime memory limit configured by the - // user, if a limit exists. - // Instrument: updowncounter - // Unit: By - // Stability: development - GoMemoryLimitName = "go.memory.limit" - GoMemoryLimitUnit = "By" - GoMemoryLimitDescription = "Go runtime memory limit configured by the user, if a limit exists." - // GoMemoryUsed is the metric conforming to the "go.memory.used" semantic - // conventions. It represents the memory used by the Go runtime. - // Instrument: updowncounter - // Unit: By - // Stability: development - GoMemoryUsedName = "go.memory.used" - GoMemoryUsedUnit = "By" - GoMemoryUsedDescription = "Memory used by the Go runtime." - // GoProcessorLimit is the metric conforming to the "go.processor.limit" - // semantic conventions. It represents the number of OS threads that can - // execute user-level Go code simultaneously. - // Instrument: updowncounter - // Unit: {thread} - // Stability: development - GoProcessorLimitName = "go.processor.limit" - GoProcessorLimitUnit = "{thread}" - GoProcessorLimitDescription = "The number of OS threads that can execute user-level Go code simultaneously." - // GoScheduleDuration is the metric conforming to the "go.schedule.duration" - // semantic conventions. It represents the time goroutines have spent in the - // scheduler in a runnable state before actually running. - // Instrument: histogram - // Unit: s - // Stability: development - GoScheduleDurationName = "go.schedule.duration" - GoScheduleDurationUnit = "s" - GoScheduleDurationDescription = "The time goroutines have spent in the scheduler in a runnable state before actually running." - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: development - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: development - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: development - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: development - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: development - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: development - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: development - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - // HwEnergy is the metric conforming to the "hw.energy" semantic conventions. - // It represents the energy consumed by the component. - // Instrument: counter - // Unit: J - // Stability: development - HwEnergyName = "hw.energy" - HwEnergyUnit = "J" - HwEnergyDescription = "Energy consumed by the component" - // HwErrors is the metric conforming to the "hw.errors" semantic conventions. - // It represents the number of errors encountered by the component. - // Instrument: counter - // Unit: {error} - // Stability: development - HwErrorsName = "hw.errors" - HwErrorsUnit = "{error}" - HwErrorsDescription = "Number of errors encountered by the component" - // HwPower is the metric conforming to the "hw.power" semantic conventions. It - // represents the instantaneous power consumed by the component. - // Instrument: gauge - // Unit: W - // Stability: development - HwPowerName = "hw.power" - HwPowerUnit = "W" - HwPowerDescription = "Instantaneous power consumed by the component" - // HwStatus is the metric conforming to the "hw.status" semantic conventions. - // It represents the operational status: `1` (true) or `0` (false) for each of - // the possible states. - // Instrument: updowncounter - // Unit: 1 - // Stability: development - HwStatusName = "hw.status" - HwStatusUnit = "1" - HwStatusDescription = "Operational status: `1` (true) or `0` (false) for each of the possible states" - // K8SCronJobActiveJobs is the metric conforming to the - // "k8s.cronjob.active_jobs" semantic conventions. It represents the number of - // actively running jobs for a cronjob. - // Instrument: updowncounter - // Unit: {job} - // Stability: development - K8SCronJobActiveJobsName = "k8s.cronjob.active_jobs" - K8SCronJobActiveJobsUnit = "{job}" - K8SCronJobActiveJobsDescription = "The number of actively running jobs for a cronjob" - // K8SDaemonSetCurrentScheduledNodes is the metric conforming to the - // "k8s.daemonset.current_scheduled_nodes" semantic conventions. It represents - // the number of nodes that are running at least 1 daemon pod and are supposed - // to run the daemon pod. - // Instrument: updowncounter - // Unit: {node} - // Stability: development - K8SDaemonSetCurrentScheduledNodesName = "k8s.daemonset.current_scheduled_nodes" - K8SDaemonSetCurrentScheduledNodesUnit = "{node}" - K8SDaemonSetCurrentScheduledNodesDescription = "Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod" - // K8SDaemonSetDesiredScheduledNodes is the metric conforming to the - // "k8s.daemonset.desired_scheduled_nodes" semantic conventions. It represents - // the number of nodes that should be running the daemon pod (including nodes - // currently running the daemon pod). - // Instrument: updowncounter - // Unit: {node} - // Stability: development - K8SDaemonSetDesiredScheduledNodesName = "k8s.daemonset.desired_scheduled_nodes" - K8SDaemonSetDesiredScheduledNodesUnit = "{node}" - K8SDaemonSetDesiredScheduledNodesDescription = "Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)" - // K8SDaemonSetMisscheduledNodes is the metric conforming to the - // "k8s.daemonset.misscheduled_nodes" semantic conventions. It represents the - // number of nodes that are running the daemon pod, but are not supposed to run - // the daemon pod. - // Instrument: updowncounter - // Unit: {node} - // Stability: development - K8SDaemonSetMisscheduledNodesName = "k8s.daemonset.misscheduled_nodes" - K8SDaemonSetMisscheduledNodesUnit = "{node}" - K8SDaemonSetMisscheduledNodesDescription = "Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod" - // K8SDaemonSetReadyNodes is the metric conforming to the - // "k8s.daemonset.ready_nodes" semantic conventions. It represents the number - // of nodes that should be running the daemon pod and have one or more of the - // daemon pod running and ready. - // Instrument: updowncounter - // Unit: {node} - // Stability: development - K8SDaemonSetReadyNodesName = "k8s.daemonset.ready_nodes" - K8SDaemonSetReadyNodesUnit = "{node}" - K8SDaemonSetReadyNodesDescription = "Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready" - // K8SDeploymentAvailablePods is the metric conforming to the - // "k8s.deployment.available_pods" semantic conventions. It represents the - // total number of available replica pods (ready for at least minReadySeconds) - // targeted by this deployment. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SDeploymentAvailablePodsName = "k8s.deployment.available_pods" - K8SDeploymentAvailablePodsUnit = "{pod}" - K8SDeploymentAvailablePodsDescription = "Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment" - // K8SDeploymentDesiredPods is the metric conforming to the - // "k8s.deployment.desired_pods" semantic conventions. It represents the number - // of desired replica pods in this deployment. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SDeploymentDesiredPodsName = "k8s.deployment.desired_pods" - K8SDeploymentDesiredPodsUnit = "{pod}" - K8SDeploymentDesiredPodsDescription = "Number of desired replica pods in this deployment" - // K8SHpaCurrentPods is the metric conforming to the "k8s.hpa.current_pods" - // semantic conventions. It represents the current number of replica pods - // managed by this horizontal pod autoscaler, as last seen by the autoscaler. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SHpaCurrentPodsName = "k8s.hpa.current_pods" - K8SHpaCurrentPodsUnit = "{pod}" - K8SHpaCurrentPodsDescription = "Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler" - // K8SHpaDesiredPods is the metric conforming to the "k8s.hpa.desired_pods" - // semantic conventions. It represents the desired number of replica pods - // managed by this horizontal pod autoscaler, as last calculated by the - // autoscaler. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SHpaDesiredPodsName = "k8s.hpa.desired_pods" - K8SHpaDesiredPodsUnit = "{pod}" - K8SHpaDesiredPodsDescription = "Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler" - // K8SHpaMaxPods is the metric conforming to the "k8s.hpa.max_pods" semantic - // conventions. It represents the upper limit for the number of replica pods to - // which the autoscaler can scale up. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SHpaMaxPodsName = "k8s.hpa.max_pods" - K8SHpaMaxPodsUnit = "{pod}" - K8SHpaMaxPodsDescription = "The upper limit for the number of replica pods to which the autoscaler can scale up" - // K8SHpaMinPods is the metric conforming to the "k8s.hpa.min_pods" semantic - // conventions. It represents the lower limit for the number of replica pods to - // which the autoscaler can scale down. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SHpaMinPodsName = "k8s.hpa.min_pods" - K8SHpaMinPodsUnit = "{pod}" - K8SHpaMinPodsDescription = "The lower limit for the number of replica pods to which the autoscaler can scale down" - // K8SJobActivePods is the metric conforming to the "k8s.job.active_pods" - // semantic conventions. It represents the number of pending and actively - // running pods for a job. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SJobActivePodsName = "k8s.job.active_pods" - K8SJobActivePodsUnit = "{pod}" - K8SJobActivePodsDescription = "The number of pending and actively running pods for a job" - // K8SJobDesiredSuccessfulPods is the metric conforming to the - // "k8s.job.desired_successful_pods" semantic conventions. It represents the - // desired number of successfully finished pods the job should be run with. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SJobDesiredSuccessfulPodsName = "k8s.job.desired_successful_pods" - K8SJobDesiredSuccessfulPodsUnit = "{pod}" - K8SJobDesiredSuccessfulPodsDescription = "The desired number of successfully finished pods the job should be run with" - // K8SJobFailedPods is the metric conforming to the "k8s.job.failed_pods" - // semantic conventions. It represents the number of pods which reached phase - // Failed for a job. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SJobFailedPodsName = "k8s.job.failed_pods" - K8SJobFailedPodsUnit = "{pod}" - K8SJobFailedPodsDescription = "The number of pods which reached phase Failed for a job" - // K8SJobMaxParallelPods is the metric conforming to the - // "k8s.job.max_parallel_pods" semantic conventions. It represents the max - // desired number of pods the job should run at any given time. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SJobMaxParallelPodsName = "k8s.job.max_parallel_pods" - K8SJobMaxParallelPodsUnit = "{pod}" - K8SJobMaxParallelPodsDescription = "The max desired number of pods the job should run at any given time" - // K8SJobSuccessfulPods is the metric conforming to the - // "k8s.job.successful_pods" semantic conventions. It represents the number of - // pods which reached phase Succeeded for a job. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SJobSuccessfulPodsName = "k8s.job.successful_pods" - K8SJobSuccessfulPodsUnit = "{pod}" - K8SJobSuccessfulPodsDescription = "The number of pods which reached phase Succeeded for a job" - // K8SNamespacePhase is the metric conforming to the "k8s.namespace.phase" - // semantic conventions. It represents the describes number of K8s namespaces - // that are currently in a given phase. - // Instrument: updowncounter - // Unit: {namespace} - // Stability: development - K8SNamespacePhaseName = "k8s.namespace.phase" - K8SNamespacePhaseUnit = "{namespace}" - K8SNamespacePhaseDescription = "Describes number of K8s namespaces that are currently in a given phase." - // K8SNodeCPUTime is the metric conforming to the "k8s.node.cpu.time" semantic - // conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: development - K8SNodeCPUTimeName = "k8s.node.cpu.time" - K8SNodeCPUTimeUnit = "s" - K8SNodeCPUTimeDescription = "Total CPU time consumed" - // K8SNodeCPUUsage is the metric conforming to the "k8s.node.cpu.usage" - // semantic conventions. It represents the node's CPU usage, measured in cpus. - // Range from 0 to the number of allocatable CPUs. - // Instrument: gauge - // Unit: {cpu} - // Stability: development - K8SNodeCPUUsageName = "k8s.node.cpu.usage" - K8SNodeCPUUsageUnit = "{cpu}" - K8SNodeCPUUsageDescription = "Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs" - // K8SNodeMemoryUsage is the metric conforming to the "k8s.node.memory.usage" - // semantic conventions. It represents the memory usage of the Node. - // Instrument: gauge - // Unit: By - // Stability: development - K8SNodeMemoryUsageName = "k8s.node.memory.usage" - K8SNodeMemoryUsageUnit = "By" - K8SNodeMemoryUsageDescription = "Memory usage of the Node" - // K8SNodeNetworkErrors is the metric conforming to the - // "k8s.node.network.errors" semantic conventions. It represents the node - // network errors. - // Instrument: counter - // Unit: {error} - // Stability: development - K8SNodeNetworkErrorsName = "k8s.node.network.errors" - K8SNodeNetworkErrorsUnit = "{error}" - K8SNodeNetworkErrorsDescription = "Node network errors" - // K8SNodeNetworkIo is the metric conforming to the "k8s.node.network.io" - // semantic conventions. It represents the network bytes for the Node. - // Instrument: counter - // Unit: By - // Stability: development - K8SNodeNetworkIoName = "k8s.node.network.io" - K8SNodeNetworkIoUnit = "By" - K8SNodeNetworkIoDescription = "Network bytes for the Node" - // K8SNodeUptime is the metric conforming to the "k8s.node.uptime" semantic - // conventions. It represents the time the Node has been running. - // Instrument: gauge - // Unit: s - // Stability: development - K8SNodeUptimeName = "k8s.node.uptime" - K8SNodeUptimeUnit = "s" - K8SNodeUptimeDescription = "The time the Node has been running" - // K8SPodCPUTime is the metric conforming to the "k8s.pod.cpu.time" semantic - // conventions. It represents the total CPU time consumed. - // Instrument: counter - // Unit: s - // Stability: development - K8SPodCPUTimeName = "k8s.pod.cpu.time" - K8SPodCPUTimeUnit = "s" - K8SPodCPUTimeDescription = "Total CPU time consumed" - // K8SPodCPUUsage is the metric conforming to the "k8s.pod.cpu.usage" semantic - // conventions. It represents the pod's CPU usage, measured in cpus. Range from - // 0 to the number of allocatable CPUs. - // Instrument: gauge - // Unit: {cpu} - // Stability: development - K8SPodCPUUsageName = "k8s.pod.cpu.usage" - K8SPodCPUUsageUnit = "{cpu}" - K8SPodCPUUsageDescription = "Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs" - // K8SPodMemoryUsage is the metric conforming to the "k8s.pod.memory.usage" - // semantic conventions. It represents the memory usage of the Pod. - // Instrument: gauge - // Unit: By - // Stability: development - K8SPodMemoryUsageName = "k8s.pod.memory.usage" - K8SPodMemoryUsageUnit = "By" - K8SPodMemoryUsageDescription = "Memory usage of the Pod" - // K8SPodNetworkErrors is the metric conforming to the "k8s.pod.network.errors" - // semantic conventions. It represents the pod network errors. - // Instrument: counter - // Unit: {error} - // Stability: development - K8SPodNetworkErrorsName = "k8s.pod.network.errors" - K8SPodNetworkErrorsUnit = "{error}" - K8SPodNetworkErrorsDescription = "Pod network errors" - // K8SPodNetworkIo is the metric conforming to the "k8s.pod.network.io" - // semantic conventions. It represents the network bytes for the Pod. - // Instrument: counter - // Unit: By - // Stability: development - K8SPodNetworkIoName = "k8s.pod.network.io" - K8SPodNetworkIoUnit = "By" - K8SPodNetworkIoDescription = "Network bytes for the Pod" - // K8SPodUptime is the metric conforming to the "k8s.pod.uptime" semantic - // conventions. It represents the time the Pod has been running. - // Instrument: gauge - // Unit: s - // Stability: development - K8SPodUptimeName = "k8s.pod.uptime" - K8SPodUptimeUnit = "s" - K8SPodUptimeDescription = "The time the Pod has been running" - // K8SReplicaSetAvailablePods is the metric conforming to the - // "k8s.replicaset.available_pods" semantic conventions. It represents the - // total number of available replica pods (ready for at least minReadySeconds) - // targeted by this replicaset. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SReplicaSetAvailablePodsName = "k8s.replicaset.available_pods" - K8SReplicaSetAvailablePodsUnit = "{pod}" - K8SReplicaSetAvailablePodsDescription = "Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset" - // K8SReplicaSetDesiredPods is the metric conforming to the - // "k8s.replicaset.desired_pods" semantic conventions. It represents the number - // of desired replica pods in this replicaset. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SReplicaSetDesiredPodsName = "k8s.replicaset.desired_pods" - K8SReplicaSetDesiredPodsUnit = "{pod}" - K8SReplicaSetDesiredPodsDescription = "Number of desired replica pods in this replicaset" - // K8SReplicationControllerAvailablePods is the metric conforming to the - // "k8s.replication_controller.available_pods" semantic conventions. It - // represents the total number of available replica pods (ready for at least - // minReadySeconds) targeted by this replication controller. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SReplicationControllerAvailablePodsName = "k8s.replication_controller.available_pods" - K8SReplicationControllerAvailablePodsUnit = "{pod}" - K8SReplicationControllerAvailablePodsDescription = "Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller" - // K8SReplicationControllerDesiredPods is the metric conforming to the - // "k8s.replication_controller.desired_pods" semantic conventions. It - // represents the number of desired replica pods in this replication - // controller. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SReplicationControllerDesiredPodsName = "k8s.replication_controller.desired_pods" - K8SReplicationControllerDesiredPodsUnit = "{pod}" - K8SReplicationControllerDesiredPodsDescription = "Number of desired replica pods in this replication controller" - // K8SStatefulSetCurrentPods is the metric conforming to the - // "k8s.statefulset.current_pods" semantic conventions. It represents the - // number of replica pods created by the statefulset controller from the - // statefulset version indicated by currentRevision. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SStatefulSetCurrentPodsName = "k8s.statefulset.current_pods" - K8SStatefulSetCurrentPodsUnit = "{pod}" - K8SStatefulSetCurrentPodsDescription = "The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision" - // K8SStatefulSetDesiredPods is the metric conforming to the - // "k8s.statefulset.desired_pods" semantic conventions. It represents the - // number of desired replica pods in this statefulset. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SStatefulSetDesiredPodsName = "k8s.statefulset.desired_pods" - K8SStatefulSetDesiredPodsUnit = "{pod}" - K8SStatefulSetDesiredPodsDescription = "Number of desired replica pods in this statefulset" - // K8SStatefulSetReadyPods is the metric conforming to the - // "k8s.statefulset.ready_pods" semantic conventions. It represents the number - // of replica pods created for this statefulset with a Ready Condition. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SStatefulSetReadyPodsName = "k8s.statefulset.ready_pods" - K8SStatefulSetReadyPodsUnit = "{pod}" - K8SStatefulSetReadyPodsDescription = "The number of replica pods created for this statefulset with a Ready Condition" - // K8SStatefulSetUpdatedPods is the metric conforming to the - // "k8s.statefulset.updated_pods" semantic conventions. It represents the - // number of replica pods created by the statefulset controller from the - // statefulset version indicated by updateRevision. - // Instrument: updowncounter - // Unit: {pod} - // Stability: development - K8SStatefulSetUpdatedPodsName = "k8s.statefulset.updated_pods" - K8SStatefulSetUpdatedPodsUnit = "{pod}" - K8SStatefulSetUpdatedPodsDescription = "Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision" - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: stable - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: stable - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: stable - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: stable - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: stable - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: stable - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: stable - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: stable - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - // MessagingClientConsumedMessages is the metric conforming to the - // "messaging.client.consumed.messages" semantic conventions. It represents the - // number of messages that were delivered to the application. - // Instrument: counter - // Unit: {message} - // Stability: development - MessagingClientConsumedMessagesName = "messaging.client.consumed.messages" - MessagingClientConsumedMessagesUnit = "{message}" - MessagingClientConsumedMessagesDescription = "Number of messages that were delivered to the application." - // MessagingClientOperationDuration is the metric conforming to the - // "messaging.client.operation.duration" semantic conventions. It represents - // the duration of messaging operation initiated by a producer or consumer - // client. - // Instrument: histogram - // Unit: s - // Stability: development - MessagingClientOperationDurationName = "messaging.client.operation.duration" - MessagingClientOperationDurationUnit = "s" - MessagingClientOperationDurationDescription = "Duration of messaging operation initiated by a producer or consumer client." - // MessagingClientPublishedMessages is the metric conforming to the - // "messaging.client.published.messages" semantic conventions. It represents - // the deprecated. Use `messaging.client.sent.messages` instead. - // Instrument: counter - // Unit: {message} - // Stability: development - // Deprecated: Replaced by `messaging.client.sent.messages`. - MessagingClientPublishedMessagesName = "messaging.client.published.messages" - MessagingClientPublishedMessagesUnit = "{message}" - MessagingClientPublishedMessagesDescription = "Deprecated. Use `messaging.client.sent.messages` instead." - // MessagingClientSentMessages is the metric conforming to the - // "messaging.client.sent.messages" semantic conventions. It represents the - // number of messages producer attempted to send to the broker. - // Instrument: counter - // Unit: {message} - // Stability: development - MessagingClientSentMessagesName = "messaging.client.sent.messages" - MessagingClientSentMessagesUnit = "{message}" - MessagingClientSentMessagesDescription = "Number of messages producer attempted to send to the broker." - // MessagingProcessDuration is the metric conforming to the - // "messaging.process.duration" semantic conventions. It represents the - // duration of processing operation. - // Instrument: histogram - // Unit: s - // Stability: development - MessagingProcessDurationName = "messaging.process.duration" - MessagingProcessDurationUnit = "s" - MessagingProcessDurationDescription = "Duration of processing operation." - // MessagingProcessMessages is the metric conforming to the - // "messaging.process.messages" semantic conventions. It represents the - // deprecated. Use `messaging.client.consumed.messages` instead. - // Instrument: counter - // Unit: {message} - // Stability: development - // Deprecated: Replaced by `messaging.client.consumed.messages`. - MessagingProcessMessagesName = "messaging.process.messages" - MessagingProcessMessagesUnit = "{message}" - MessagingProcessMessagesDescription = "Deprecated. Use `messaging.client.consumed.messages` instead." - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // deprecated. Use `messaging.client.operation.duration` instead. - // Instrument: histogram - // Unit: s - // Stability: development - // Deprecated: Replaced by `messaging.client.operation.duration`. - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Deprecated. Use `messaging.client.operation.duration` instead." - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // deprecated. Use `messaging.client.produced.messages` instead. - // Instrument: counter - // Unit: {message} - // Stability: development - // Deprecated: Replaced by `messaging.client.produced.messages`. - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Deprecated. Use `messaging.client.produced.messages` instead." - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // deprecated. Use `messaging.client.operation.duration` instead. - // Instrument: histogram - // Unit: s - // Stability: development - // Deprecated: Replaced by `messaging.client.operation.duration`. - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Deprecated. Use `messaging.client.operation.duration` instead." - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // deprecated. Use `messaging.client.consumed.messages` instead. - // Instrument: counter - // Unit: {message} - // Stability: development - // Deprecated: Replaced by `messaging.client.consumed.messages`. - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Deprecated. Use `messaging.client.consumed.messages` instead." - // ProcessContextSwitches is the metric conforming to the - // "process.context_switches" semantic conventions. It represents the number of - // times the process has been context switched. - // Instrument: counter - // Unit: {count} - // Stability: development - ProcessContextSwitchesName = "process.context_switches" - ProcessContextSwitchesUnit = "{count}" - ProcessContextSwitchesDescription = "Number of times the process has been context switched." - // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic - // conventions. It represents the total CPU seconds broken down by different - // states. - // Instrument: counter - // Unit: s - // Stability: development - ProcessCPUTimeName = "process.cpu.time" - ProcessCPUTimeUnit = "s" - ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." - // ProcessCPUUtilization is the metric conforming to the - // "process.cpu.utilization" semantic conventions. It represents the difference - // in process.cpu.time since the last measurement, divided by the elapsed time - // and number of CPUs available to the process. - // Instrument: gauge - // Unit: 1 - // Stability: development - ProcessCPUUtilizationName = "process.cpu.utilization" - ProcessCPUUtilizationUnit = "1" - ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." - // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic - // conventions. It represents the disk bytes transferred. - // Instrument: counter - // Unit: By - // Stability: development - ProcessDiskIoName = "process.disk.io" - ProcessDiskIoUnit = "By" - ProcessDiskIoDescription = "Disk bytes transferred." - // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" - // semantic conventions. It represents the amount of physical memory in use. - // Instrument: updowncounter - // Unit: By - // Stability: development - ProcessMemoryUsageName = "process.memory.usage" - ProcessMemoryUsageUnit = "By" - ProcessMemoryUsageDescription = "The amount of physical memory in use." - // ProcessMemoryVirtual is the metric conforming to the - // "process.memory.virtual" semantic conventions. It represents the amount of - // committed virtual memory. - // Instrument: updowncounter - // Unit: By - // Stability: development - ProcessMemoryVirtualName = "process.memory.virtual" - ProcessMemoryVirtualUnit = "By" - ProcessMemoryVirtualDescription = "The amount of committed virtual memory." - // ProcessNetworkIo is the metric conforming to the "process.network.io" - // semantic conventions. It represents the network bytes transferred. - // Instrument: counter - // Unit: By - // Stability: development - ProcessNetworkIoName = "process.network.io" - ProcessNetworkIoUnit = "By" - ProcessNetworkIoDescription = "Network bytes transferred." - // ProcessOpenFileDescriptorCount is the metric conforming to the - // "process.open_file_descriptor.count" semantic conventions. It represents the - // number of file descriptors in use by the process. - // Instrument: updowncounter - // Unit: {count} - // Stability: development - ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" - ProcessOpenFileDescriptorCountUnit = "{count}" - ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." - // ProcessPagingFaults is the metric conforming to the "process.paging.faults" - // semantic conventions. It represents the number of page faults the process - // has made. - // Instrument: counter - // Unit: {fault} - // Stability: development - ProcessPagingFaultsName = "process.paging.faults" - ProcessPagingFaultsUnit = "{fault}" - ProcessPagingFaultsDescription = "Number of page faults the process has made." - // ProcessThreadCount is the metric conforming to the "process.thread.count" - // semantic conventions. It represents the process threads count. - // Instrument: updowncounter - // Unit: {thread} - // Stability: development - ProcessThreadCountName = "process.thread.count" - ProcessThreadCountUnit = "{thread}" - ProcessThreadCountDescription = "Process threads count." - // ProcessUptime is the metric conforming to the "process.uptime" semantic - // conventions. It represents the time the process has been running. - // Instrument: gauge - // Unit: s - // Stability: development - ProcessUptimeName = "process.uptime" - ProcessUptimeUnit = "s" - ProcessUptimeDescription = "The time the process has been running." - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: development - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: development - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: development - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: development - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: development - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: development - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: development - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: development - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: development - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: development - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: stable - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: stable - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: development - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: development - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: development - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: development - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: development - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: development - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - // SystemDiskLimit is the metric conforming to the "system.disk.limit" semantic - // conventions. It represents the total storage capacity of the disk. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemDiskLimitName = "system.disk.limit" - SystemDiskLimitUnit = "By" - SystemDiskLimitDescription = "The total storage capacity of the disk" - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: development - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - // SystemFilesystemLimit is the metric conforming to the - // "system.filesystem.limit" semantic conventions. It represents the total - // storage capacity of the filesystem. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemFilesystemLimitName = "system.filesystem.limit" - SystemFilesystemLimitUnit = "By" - SystemFilesystemLimitDescription = "The total storage capacity of the filesystem" - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. It represents the reports a - // filesystem's space usage across different states. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - SystemFilesystemUsageDescription = "Reports a filesystem's space usage across different states." - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" - // SystemLinuxMemorySlabUsage is the metric conforming to the - // "system.linux.memory.slab.usage" semantic conventions. It represents the - // reports the memory used by the Linux kernel for managing caches of - // frequently used objects. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemLinuxMemorySlabUsageName = "system.linux.memory.slab.usage" - SystemLinuxMemorySlabUsageUnit = "By" - SystemLinuxMemorySlabUsageDescription = "Reports the memory used by the Linux kernel for managing caches of frequently used objects." - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - // SystemMemoryShared is the metric conforming to the "system.memory.shared" - // semantic conventions. It represents the shared memory used (mostly by - // tmpfs). - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemMemorySharedName = "system.memory.shared" - SystemMemorySharedUnit = "By" - SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: development - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: development - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: development - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: development - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - // SystemProcessCount is the metric conforming to the "system.process.count" - // semantic conventions. It represents the total number of processes in each - // state. - // Instrument: updowncounter - // Unit: {process} - // Stability: development - SystemProcessCountName = "system.process.count" - SystemProcessCountUnit = "{process}" - SystemProcessCountDescription = "Total number of processes in each state" - // SystemProcessCreated is the metric conforming to the - // "system.process.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: development - SystemProcessCreatedName = "system.process.created" - SystemProcessCreatedUnit = "{process}" - SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" - // SystemUptime is the metric conforming to the "system.uptime" semantic - // conventions. It represents the time the system has been running. - // Instrument: gauge - // Unit: s - // Stability: development - SystemUptimeName = "system.uptime" - SystemUptimeUnit = "s" - SystemUptimeDescription = "The time the system has been running" - // VCSChangeCount is the metric conforming to the "vcs.change.count" semantic - // conventions. It represents the number of changes (pull requests/merge - // requests/changelists) in a repository, categorized by their state (e.g. open - // or merged). - // Instrument: updowncounter - // Unit: {change} - // Stability: development - VCSChangeCountName = "vcs.change.count" - VCSChangeCountUnit = "{change}" - VCSChangeCountDescription = "The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)" - // VCSChangeDuration is the metric conforming to the "vcs.change.duration" - // semantic conventions. It represents the time duration a change (pull - // request/merge request/changelist) has been in a given state. - // Instrument: gauge - // Unit: s - // Stability: development - VCSChangeDurationName = "vcs.change.duration" - VCSChangeDurationUnit = "s" - VCSChangeDurationDescription = "The time duration a change (pull request/merge request/changelist) has been in a given state." - // VCSChangeTimeToApproval is the metric conforming to the - // "vcs.change.time_to_approval" semantic conventions. It represents the amount - // of time since its creation it took a change (pull request/merge - // request/changelist) to get the first approval. - // Instrument: gauge - // Unit: s - // Stability: development - VCSChangeTimeToApprovalName = "vcs.change.time_to_approval" - VCSChangeTimeToApprovalUnit = "s" - VCSChangeTimeToApprovalDescription = "The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval." - // VCSChangeTimeToMerge is the metric conforming to the - // "vcs.change.time_to_merge" semantic conventions. It represents the amount of - // time since its creation it took a change (pull request/merge - // request/changelist) to get merged into the target(base) ref. - // Instrument: gauge - // Unit: s - // Stability: development - VCSChangeTimeToMergeName = "vcs.change.time_to_merge" - VCSChangeTimeToMergeUnit = "s" - VCSChangeTimeToMergeDescription = "The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref." - // VCSContributorCount is the metric conforming to the "vcs.contributor.count" - // semantic conventions. It represents the number of unique contributors to a - // repository. - // Instrument: gauge - // Unit: {contributor} - // Stability: development - VCSContributorCountName = "vcs.contributor.count" - VCSContributorCountUnit = "{contributor}" - VCSContributorCountDescription = "The number of unique contributors to a repository" - // VCSRefCount is the metric conforming to the "vcs.ref.count" semantic - // conventions. It represents the number of refs of type branch or tag in a - // repository. - // Instrument: updowncounter - // Unit: {ref} - // Stability: development - VCSRefCountName = "vcs.ref.count" - VCSRefCountUnit = "{ref}" - VCSRefCountDescription = "The number of refs of type branch or tag in a repository." - // VCSRefLinesDelta is the metric conforming to the "vcs.ref.lines_delta" - // semantic conventions. It represents the number of lines added/removed in a - // ref (branch) relative to the ref from the `vcs.ref.base.name` attribute. - // Instrument: gauge - // Unit: {line} - // Stability: development - VCSRefLinesDeltaName = "vcs.ref.lines_delta" - VCSRefLinesDeltaUnit = "{line}" - VCSRefLinesDeltaDescription = "The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute." - // VCSRefRevisionsDelta is the metric conforming to the - // "vcs.ref.revisions_delta" semantic conventions. It represents the number of - // revisions (commits) a ref (branch) is ahead/behind the branch from the - // `vcs.ref.base.name` attribute. - // Instrument: gauge - // Unit: {revision} - // Stability: development - VCSRefRevisionsDeltaName = "vcs.ref.revisions_delta" - VCSRefRevisionsDeltaUnit = "{revision}" - VCSRefRevisionsDeltaDescription = "The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute" - // VCSRefTime is the metric conforming to the "vcs.ref.time" semantic - // conventions. It represents the time a ref (branch) created from the default - // branch (trunk) has existed. The `ref.type` attribute will always be `branch` - // . - // Instrument: gauge - // Unit: s - // Stability: development - VCSRefTimeName = "vcs.ref.time" - VCSRefTimeUnit = "s" - VCSRefTimeDescription = "Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`" - // VCSRepositoryCount is the metric conforming to the "vcs.repository.count" - // semantic conventions. It represents the number of repositories in an - // organization. - // Instrument: updowncounter - // Unit: {repository} - // Stability: development - VCSRepositoryCountName = "vcs.repository.count" - VCSRepositoryCountUnit = "{repository}" - VCSRepositoryCountDescription = "The number of repositories in an organization." -) \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/schema.go deleted file mode 100644 index b2e7a515..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.30.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.30.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md deleted file mode 100644 index 02b56115..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md +++ /dev/null @@ -1,4 +0,0 @@ - -# Migration from v1.33.0 to v1.34.0 - -The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md deleted file mode 100644 index fab06c97..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.34.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.34.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go deleted file mode 100644 index 98c0fdda..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go +++ /dev/null @@ -1,14061 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" - -import "go.opentelemetry.io/otel/attribute" - -// Namespace: android -const ( - // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" - // semantic conventions. It represents the this attribute represents the state - // of the application. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "created" - // Note: The Android lifecycle states are defined in - // [Activity lifecycle callbacks], and from which the `OS identifiers` are - // derived. - // - // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc - AndroidAppStateKey = attribute.Key("android.app.state") - - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version (`os.version`) of - // the android operating system. More information can be found [here]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "33", "32" - // - // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found [here]. -// -// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// Enum values for android.app.state -var ( - // Any time before Activity.onResume() or, if the app has no Activity, - // Context.startService() has been called in the app for the first time. - // - // Stability: development - AndroidAppStateCreated = AndroidAppStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, - // Context.stopService() has been called when the app was in the foreground - // state. - // - // Stability: development - AndroidAppStateBackground = AndroidAppStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, - // Context.startService() has been called when the app was in either the created - // or background states. - // - // Stability: development - AndroidAppStateForeground = AndroidAppStateKey.String("foreground") -) - -// Namespace: app -const ( - // AppInstallationIDKey is the attribute Key conforming to the - // "app.installation.id" semantic conventions. It represents a unique identifier - // representing the installation of an application on a specific device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" - // Note: Its value SHOULD persist across launches of the same application - // installation, including through application upgrades. - // It SHOULD change if the application is uninstalled or if all applications of - // the vendor are uninstalled. - // Additionally, users might be able to reset this value (e.g. by clearing - // application data). - // If an app is installed multiple times on the same device (e.g. in different - // accounts on Android), each `app.installation.id` SHOULD have a different - // value. - // If multiple OpenTelemetry SDKs are used within the same application, they - // SHOULD use the same value for `app.installation.id`. - // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the - // `app.installation.id`. - // - // For iOS, this value SHOULD be equal to the [vendor identifier]. - // - // For Android, examples of `app.installation.id` implementations include: - // - // - [Firebase Installation ID]. - // - A globally unique UUID which is persisted across sessions in your - // application. - // - [App set ID]. - // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. - // - // More information about Android identifier best practices can be found [here] - // . - // - // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor - // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations - // [App set ID]: https://developer.android.com/identity/app-set-id - // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID - // [here]: https://developer.android.com/training/articles/user-data-ids - AppInstallationIDKey = attribute.Key("app.installation.id") - - // AppScreenCoordinateXKey is the attribute Key conforming to the - // "app.screen.coordinate.x" semantic conventions. It represents the x - // (horizontal) coordinate of a screen coordinate, in screen pixels. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 131 - AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") - - // AppScreenCoordinateYKey is the attribute Key conforming to the - // "app.screen.coordinate.y" semantic conventions. It represents the y - // (vertical) component of a screen coordinate, in screen pixels. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12, 99 - AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") - - // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" - // semantic conventions. It represents an identifier that uniquely - // differentiates this widget from other widgets in the same application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" - // Note: A widget is an application component, typically an on-screen visual GUI - // element. - AppWidgetIDKey = attribute.Key("app.widget.id") - - // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" - // semantic conventions. It represents the name of an application widget. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "submit", "attack", "Clear Cart" - // Note: A widget is an application component, typically an on-screen visual GUI - // element. - AppWidgetNameKey = attribute.Key("app.widget.name") -) - -// AppInstallationID returns an attribute KeyValue conforming to the -// "app.installation.id" semantic conventions. It represents a unique identifier -// representing the installation of an application on a specific device. -func AppInstallationID(val string) attribute.KeyValue { - return AppInstallationIDKey.String(val) -} - -// AppScreenCoordinateX returns an attribute KeyValue conforming to the -// "app.screen.coordinate.x" semantic conventions. It represents the x -// (horizontal) coordinate of a screen coordinate, in screen pixels. -func AppScreenCoordinateX(val int) attribute.KeyValue { - return AppScreenCoordinateXKey.Int(val) -} - -// AppScreenCoordinateY returns an attribute KeyValue conforming to the -// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) -// component of a screen coordinate, in screen pixels. -func AppScreenCoordinateY(val int) attribute.KeyValue { - return AppScreenCoordinateYKey.Int(val) -} - -// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" -// semantic conventions. It represents an identifier that uniquely differentiates -// this widget from other widgets in the same application. -func AppWidgetID(val string) attribute.KeyValue { - return AppWidgetIDKey.String(val) -} - -// AppWidgetName returns an attribute KeyValue conforming to the -// "app.widget.name" semantic conventions. It represents the name of an -// application widget. -func AppWidgetName(val string) attribute.KeyValue { - return AppWidgetNameKey.String(val) -} - -// Namespace: artifact -const ( - // ArtifactAttestationFilenameKey is the attribute Key conforming to the - // "artifact.attestation.filename" semantic conventions. It represents the - // provenance filename of the built attestation which directly relates to the - // build artifact filename. This filename SHOULD accompany the artifact at - // publish time. See the [SLSA Relationship] specification for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0.attestation", - // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", - // "file-name-package.tar.gz.intoto.json1" - // - // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations - ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") - - // ArtifactAttestationHashKey is the attribute Key conforming to the - // "artifact.attestation.hash" semantic conventions. It represents the full - // [hash value (see glossary)], of the built attestation. Some envelopes in the - // [software attestation space] also refer to this as the **digest**. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec - ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") - - // ArtifactAttestationIDKey is the attribute Key conforming to the - // "artifact.attestation.id" semantic conventions. It represents the id of the - // build [software attestation]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - // - // [software attestation]: https://slsa.dev/attestation-model - ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") - - // ArtifactFilenameKey is the attribute Key conforming to the - // "artifact.filename" semantic conventions. It represents the human readable - // file name of the artifact, typically generated during build and release - // processes. Often includes the package name and version in the file name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", - // "release-1.tar.gz", "file-name-package.tar.gz" - // Note: This file name can also act as the [Package Name] - // in cases where the package ecosystem maps accordingly. - // Additionally, the artifact [can be published] - // for others, but that is not a guarantee. - // - // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model - // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain - ArtifactFilenameKey = attribute.Key("artifact.filename") - - // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" - // semantic conventions. It represents the full [hash value (see glossary)], - // often found in checksum.txt on a release of the artifact and used to verify - // package integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" - // Note: The specific algorithm used to create the cryptographic hash value is - // not defined. In situations where an artifact has multiple - // cryptographic hashes, it is up to the implementer to choose which - // hash value to set here; this should be the most secure hash algorithm - // that is suitable for the situation and consistent with the - // corresponding attestation. The implementer can then provide the other - // hash values through an additional set of attribute extensions as they - // deem necessary. - // - // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - ArtifactHashKey = attribute.Key("artifact.hash") - - // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" - // semantic conventions. It represents the [Package URL] of the - // [package artifact] provides a standard way to identify and locate the - // packaged artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pkg:github/package-url/purl-spec@1209109710924", - // "pkg:npm/foo@12.12.3" - // - // [Package URL]: https://github.com/package-url/purl-spec - // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model - ArtifactPurlKey = attribute.Key("artifact.purl") - - // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" - // semantic conventions. It represents the version of the artifact. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v0.1.0", "1.2.1", "122691-build" - ArtifactVersionKey = attribute.Key("artifact.version") -) - -// ArtifactAttestationFilename returns an attribute KeyValue conforming to the -// "artifact.attestation.filename" semantic conventions. It represents the -// provenance filename of the built attestation which directly relates to the -// build artifact filename. This filename SHOULD accompany the artifact at -// publish time. See the [SLSA Relationship] specification for more information. -// -// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations -func ArtifactAttestationFilename(val string) attribute.KeyValue { - return ArtifactAttestationFilenameKey.String(val) -} - -// ArtifactAttestationHash returns an attribute KeyValue conforming to the -// "artifact.attestation.hash" semantic conventions. It represents the full -// [hash value (see glossary)], of the built attestation. Some envelopes in the -// [software attestation space] also refer to this as the **digest**. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec -func ArtifactAttestationHash(val string) attribute.KeyValue { - return ArtifactAttestationHashKey.String(val) -} - -// ArtifactAttestationID returns an attribute KeyValue conforming to the -// "artifact.attestation.id" semantic conventions. It represents the id of the -// build [software attestation]. -// -// [software attestation]: https://slsa.dev/attestation-model -func ArtifactAttestationID(val string) attribute.KeyValue { - return ArtifactAttestationIDKey.String(val) -} - -// ArtifactFilename returns an attribute KeyValue conforming to the -// "artifact.filename" semantic conventions. It represents the human readable -// file name of the artifact, typically generated during build and release -// processes. Often includes the package name and version in the file name. -func ArtifactFilename(val string) attribute.KeyValue { - return ArtifactFilenameKey.String(val) -} - -// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" -// semantic conventions. It represents the full [hash value (see glossary)], -// often found in checksum.txt on a release of the artifact and used to verify -// package integrity. -// -// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf -func ArtifactHash(val string) attribute.KeyValue { - return ArtifactHashKey.String(val) -} - -// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" -// semantic conventions. It represents the [Package URL] of the -// [package artifact] provides a standard way to identify and locate the packaged -// artifact. -// -// [Package URL]: https://github.com/package-url/purl-spec -// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model -func ArtifactPurl(val string) attribute.KeyValue { - return ArtifactPurlKey.String(val) -} - -// ArtifactVersion returns an attribute KeyValue conforming to the -// "artifact.version" semantic conventions. It represents the version of the -// artifact. -func ArtifactVersion(val string) attribute.KeyValue { - return ArtifactVersionKey.String(val) -} - -// Namespace: aws -const ( - // AWSBedrockGuardrailIDKey is the attribute Key conforming to the - // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique - // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and - // prevent unwanted behavior from model responses or user messages. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "sgi5gkybzqak" - // - // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html - AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") - - // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the - // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the - // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a - // bank of information that can be queried by models to generate more relevant - // responses and augment prompts. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "XFWUPB9PAW" - // - // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html - AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") - - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the - // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the - // JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lives", "id" - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the value - // of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }" - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of the - // `Count` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the - // value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "CatsTable" - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }" - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` - // request field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value of - // the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "name_to_group" - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the - // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents - // the JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of the - // `Limit` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the - // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents - // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }" - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value of - // the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, - // ProductReviews" - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents - // the value of the `ProvisionedThroughput.WriteCapacityUnits` request - // parameter. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of - // the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of - // the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of the - // `Segment` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of the - // `Select` request parameter. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ALL_ATTRIBUTES", "COUNT" - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the number of - // items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys in - // the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Users", "Cats" - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the value - // of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS cluster]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - // - // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container instance]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" - // - // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" - // semantic conventions. It represents the ARN of a running [ECS task]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", - // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - // - // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the family name of - // the [ECS task definition] used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-family" - // - // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" - // semantic conventions. It represents the ID of a running ECS task. The ID MUST - // be extracted from `task.arn`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", - // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" - AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision for - // the task definition used to create the ECS task. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "8", "26" - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") - - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS - // cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") - - // AWSExtendedRequestIDKey is the attribute Key conforming to the - // "aws.extended_request_id" semantic conventions. It represents the AWS - // extended request ID as returned in the response header `x-amz-id-2`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" - AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") - - // AWSKinesisStreamNameKey is the attribute Key conforming to the - // "aws.kinesis.stream_name" semantic conventions. It represents the name of the - // AWS Kinesis [stream] the request refers to. Corresponds to the - // `--stream-name` parameter of the Kinesis [describe-stream] operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-stream-name" - // - // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html - // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html - AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") - - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked - // ARN as provided on the `Context` passed to the function ( - // `Lambda-Runtime-Invoked-Function-Arn` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" - // Note: This may be different from `cloud.resource_id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") - - // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the - // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID - // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda - // function. It's contents are read by Lambda and used to trigger a function. - // This isn't available in the lambda execution context or the lambda runtime - // environtment. This is going to be populated by the AWS SDK for each language - // when that UUID is present. Some of these operations are - // Create/Delete/Get/List/Update EventSourceMapping. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" - // - // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html - AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") - - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource - // Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" - // Note: See the [log group ARN format documentation]. - // - // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of the - // AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/aws/lambda/my-function", "opentelemetry-service" - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each - // write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the - // AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - // Note: See the [log stream ARN format documentation]. One log group can - // contain several log streams, so these ARNs necessarily identify both a log - // group and a log stream. - // - // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) of the - // AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in the - // response headers `x-amzn-requestid`, `x-amzn-request-id` or - // `x-amz-request-id`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" - AWSRequestIDKey = attribute.Key("aws.request_id") - - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request refers to. - // Corresponds to the `--bucket` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "some-bucket-name" - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source object - // (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 API]. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [upload-part-copy] - // - // - // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" - // Note: The `delete` attribute is only applicable to the [delete-object] - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 API]. - // - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 API] operations. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "someFile.yml" - // Note: The `key` attribute is applicable to all object-related S3 operations, - // i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - [copy-object] - // - [delete-object] - // - [get-object] - // - [head-object] - // - [put-object] - // - [restore-object] - // - [select-object-content] - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [create-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html - // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html - // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html - // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html - // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html - // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html - // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number of - // the part being uploaded in a multipart-upload operation. This is a positive - // integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the [upload-part] - // and [upload-part-copy] operations. - // The `part_number` attribute corresponds to the `--part-number` parameter of - // the - // [upload-part operation within the S3 API]. - // - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" - // semantic conventions. It represents the upload ID that identifies the - // multipart upload. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" - // Note: The `upload_id` attribute applies to S3 multipart-upload operations and - // corresponds to the `--upload-id` parameter - // of the [S3 API] multipart operations. - // This applies in particular to the following operations: - // - // - [abort-multipart-upload] - // - [complete-multipart-upload] - // - [list-parts] - // - [upload-part] - // - [upload-part-copy] - // - // - // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html - // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html - // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html - // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html - // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html - // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") - - // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the - // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN - // of the Secret stored in the Secrets Mangger. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" - AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") - - // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" - // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon - // SNS [topic] is a logical access point that acts as a communication channel. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" - // - // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html - AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") - - // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" - // semantic conventions. It represents the URL of the AWS SQS Queue. It's a - // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is - // used to access the queue and perform actions on it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" - AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") - - // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the - // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN - // of the AWS Step Functions Activity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" - AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") - - // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the - // "aws.step_functions.state_machine.arn" semantic conventions. It represents - // the ARN of the AWS Step Functions State Machine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" - AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") -) - -// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the -// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique -// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and -// prevent unwanted behavior from model responses or user messages. -// -// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html -func AWSBedrockGuardrailID(val string) attribute.KeyValue { - return AWSBedrockGuardrailIDKey.String(val) -} - -// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the -// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique -// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of -// information that can be queried by models to generate more relevant responses -// and augment prompts. -// -// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html -func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { - return AWSBedrockKnowledgeBaseIDKey.String(val) -} - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to -// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents -// the JSON-serialized value of each item in the `AttributeDefinitions` request -// field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the -// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value -// of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the -// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the -// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the -// value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field. -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of the -// `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to -// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents -// the JSON-serialized value of the `ItemCollectionMetrics` response field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to -// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents -// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request -// field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of the -// `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming -// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It -// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request -// parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of -// the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the number of -// items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the -// `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value of -// the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an -// [ECS cluster]. -// -// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container instance]. -// -// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running -// [ECS task]. -// -// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the family name of -// the [ECS task definition] used to create the ECS task. -// -// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" -// semantic conventions. It represents the ID of a running ECS task. The ID MUST -// be extracted from `task.arn`. -func AWSECSTaskID(val string) attribute.KeyValue { - return AWSECSTaskIDKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// the task definition used to create the ECS task. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// AWSExtendedRequestID returns an attribute KeyValue conforming to the -// "aws.extended_request_id" semantic conventions. It represents the AWS extended -// request ID as returned in the response header `x-amz-id-2`. -func AWSExtendedRequestID(val string) attribute.KeyValue { - return AWSExtendedRequestIDKey.String(val) -} - -// AWSKinesisStreamName returns an attribute KeyValue conforming to the -// "aws.kinesis.stream_name" semantic conventions. It represents the name of the -// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` -// parameter of the Kinesis [describe-stream] operation. -// -// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html -// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html -func AWSKinesisStreamName(val string) attribute.KeyValue { - return AWSKinesisStreamNameKey.String(val) -} - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked -// ARN as provided on the `Context` passed to the function ( -// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` -// applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the -// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID -// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda -// function. It's contents are read by Lambda and used to trigger a function. -// This isn't available in the lambda execution context or the lambda runtime -// environtment. This is going to be populated by the AWS SDK for each language -// when that UUID is present. Some of these operations are -// Create/Delete/Get/List/Update EventSourceMapping. -// -// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html -func AWSLambdaResourceMappingID(val string) attribute.KeyValue { - return AWSLambdaResourceMappingIDKey.String(val) -} - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of the -// AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" -// semantic conventions. It represents the AWS request ID as returned in the -// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` -// . -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" -// semantic conventions. It represents the S3 bucket name the request refers to. -// Corresponds to the `--bucket` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object (in -// the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" -// semantic conventions. It represents the delete request container that -// specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic -// conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 API] operations. -// -// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the -// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of -// the Secret stored in the Secrets Mangger. -func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { - return AWSSecretsmanagerSecretARNKey.String(val) -} - -// AWSSNSTopicARN returns an attribute KeyValue conforming to the -// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS -// Topic. An Amazon SNS [topic] is a logical access point that acts as a -// communication channel. -// -// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html -func AWSSNSTopicARN(val string) attribute.KeyValue { - return AWSSNSTopicARNKey.String(val) -} - -// AWSSQSQueueURL returns an attribute KeyValue conforming to the -// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS -// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service -// (SQS) and is used to access the queue and perform actions on it. -func AWSSQSQueueURL(val string) attribute.KeyValue { - return AWSSQSQueueURLKey.String(val) -} - -// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the -// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN -// of the AWS Step Functions Activity. -func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { - return AWSStepFunctionsActivityARNKey.String(val) -} - -// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to -// the "aws.step_functions.state_machine.arn" semantic conventions. It represents -// the ARN of the AWS Step Functions State Machine. -func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { - return AWSStepFunctionsStateMachineARNKey.String(val) -} - -// Enum values for aws.ecs.launchtype -var ( - // ec2 - // Stability: development - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - // Stability: development - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Namespace: az -const ( - // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic - // conventions. It represents the [Azure Resource Provider Namespace] as - // recognized by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" - // - // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers - AzNamespaceKey = attribute.Key("az.namespace") - - // AzServiceRequestIDKey is the attribute Key conforming to the - // "az.service_request_id" semantic conventions. It represents the unique - // identifier of the service request. It's generated by the Azure service and - // returned with the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "00000000-0000-0000-0000-000000000000" - AzServiceRequestIDKey = attribute.Key("az.service_request_id") -) - -// AzNamespace returns an attribute KeyValue conforming to the "az.namespace" -// semantic conventions. It represents the [Azure Resource Provider Namespace] as -// recognized by the client. -// -// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers -func AzNamespace(val string) attribute.KeyValue { - return AzNamespaceKey.String(val) -} - -// AzServiceRequestID returns an attribute KeyValue conforming to the -// "az.service_request_id" semantic conventions. It represents the unique -// identifier of the service request. It's generated by the Azure service and -// returned with the response. -func AzServiceRequestID(val string) attribute.KeyValue { - return AzServiceRequestIDKey.String(val) -} - -// Namespace: azure -const ( - // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" - // semantic conventions. It represents the unique identifier of the client - // instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" - AzureClientIDKey = attribute.Key("azure.client.id") - - // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the - // "azure.cosmosdb.connection.mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") - - // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the - // "azure.cosmosdb.consistency.level" semantic conventions. It represents the - // account or request [consistency level]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", - // "Session" - // - // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels - AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") - - // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to - // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It - // represents the list of regions contacted during operation in the order that - // they were contacted. If there is more than one region listed, it indicates - // that the operation was performed on multiple regions i.e. cross-regional - // call. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "North Central US", "Australia East", "Australia Southeast" - // Note: Region name matches the format of `displayName` in [Azure Location API] - // - // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location - AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") - - // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the - // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents - // the number of request units consumed by the operation. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 46.18, 1.0 - AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") - - // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the - // "azure.cosmosdb.request.body.size" semantic conventions. It represents the - // request payload size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") - - // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the - // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents - // the cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000, 1002 - AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") -) - -// AzureClientID returns an attribute KeyValue conforming to the -// "azure.client.id" semantic conventions. It represents the unique identifier of -// the client instance. -func AzureClientID(val string) attribute.KeyValue { - return AzureClientIDKey.String(val) -} - -// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue -// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic -// conventions. It represents the list of regions contacted during operation in -// the order that they were contacted. If there is more than one region listed, -// it indicates that the operation was performed on multiple regions i.e. -// cross-regional call. -func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { - return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) -} - -// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming -// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It -// represents the number of request units consumed by the operation. -func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { - return AzureCosmosDBOperationRequestChargeKey.Float64(val) -} - -// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the -// "azure.cosmosdb.request.body.size" semantic conventions. It represents the -// request payload size in bytes. -func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { - return AzureCosmosDBRequestBodySizeKey.Int(val) -} - -// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to -// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It -// represents the cosmos DB sub status code. -func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { - return AzureCosmosDBResponseSubStatusCodeKey.Int(val) -} - -// Enum values for azure.cosmosdb.connection.mode -var ( - // Gateway (HTTP) connection. - // Stability: development - AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") - // Direct connection. - // Stability: development - AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") -) - -// Enum values for azure.cosmosdb.consistency.level -var ( - // strong - // Stability: development - AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") - // bounded_staleness - // Stability: development - AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") - // session - // Stability: development - AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") - // eventual - // Stability: development - AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") - // consistent_prefix - // Stability: development - AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") -) - -// Namespace: browser -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.brands`). - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the "browser.language" - // semantic conventions. It represents the preferred language of the user using - // the browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "en", "en-US", "fr", "fr-FR" - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the browser is - // running on a mobile device. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be - // left unset. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" - // semantic conventions. It represents the platform on which the browser is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Windows", "macOS", "Android" - // Note: This value is intended to be taken from the [UA client hints API] ( - // `navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the - // [W3C User-Agent Client Hints specification]. Note that some (but not all) of - // these values can overlap with values in the - // [`os.type` and `os.name` attributes]. However, for consistency, the values in - // the `browser.platform` attribute should capture the exact value that the user - // agent provides. - // - // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface - // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform - // [`os.type` and `os.name` attributes]: ./os.md - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" -// semantic conventions. It represents the array of brand name and version -// separated by a space. -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred language -// of the user using the browser. -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" -// semantic conventions. It represents a boolean that is true if the browser is -// running on a mobile device. -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running. -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Namespace: cassandra -const ( - // CassandraConsistencyLevelKey is the attribute Key conforming to the - // "cassandra.consistency.level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from [CQL]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html - CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") - - // CassandraCoordinatorDCKey is the attribute Key conforming to the - // "cassandra.coordinator.dc" semantic conventions. It represents the data - // center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: us-west-2 - CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") - - // CassandraCoordinatorIDKey is the attribute Key conforming to the - // "cassandra.coordinator.id" semantic conventions. It represents the ID of the - // coordinating node for a query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af - CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") - - // CassandraPageSizeKey is the attribute Key conforming to the - // "cassandra.page.size" semantic conventions. It represents the fetch size used - // for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 5000 - CassandraPageSizeKey = attribute.Key("cassandra.page.size") - - // CassandraQueryIdempotentKey is the attribute Key conforming to the - // "cassandra.query.idempotent" semantic conventions. It represents the whether - // or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") - - // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the - // "cassandra.speculative_execution.count" semantic conventions. It represents - // the number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 2 - CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") -) - -// CassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "cassandra.coordinator.dc" semantic conventions. It represents the data center -// of the coordinating node for a query. -func CassandraCoordinatorDC(val string) attribute.KeyValue { - return CassandraCoordinatorDCKey.String(val) -} - -// CassandraCoordinatorID returns an attribute KeyValue conforming to the -// "cassandra.coordinator.id" semantic conventions. It represents the ID of the -// coordinating node for a query. -func CassandraCoordinatorID(val string) attribute.KeyValue { - return CassandraCoordinatorIDKey.String(val) -} - -// CassandraPageSize returns an attribute KeyValue conforming to the -// "cassandra.page.size" semantic conventions. It represents the fetch size used -// for paging, i.e. how many rows will be returned at once. -func CassandraPageSize(val int) attribute.KeyValue { - return CassandraPageSizeKey.Int(val) -} - -// CassandraQueryIdempotent returns an attribute KeyValue conforming to the -// "cassandra.query.idempotent" semantic conventions. It represents the whether -// or not the query is idempotent. -func CassandraQueryIdempotent(val bool) attribute.KeyValue { - return CassandraQueryIdempotentKey.Bool(val) -} - -// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to -// the "cassandra.speculative_execution.count" semantic conventions. It -// represents the number of times a query was speculatively executed. Not set or -// `0` if the query was not executed speculatively. -func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return CassandraSpeculativeExecutionCountKey.Int(val) -} - -// Enum values for cassandra.consistency.level -var ( - // all - // Stability: development - CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") - // each_quorum - // Stability: development - CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") - // quorum - // Stability: development - CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") - // local_quorum - // Stability: development - CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") - // one - // Stability: development - CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") - // two - // Stability: development - CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") - // three - // Stability: development - CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") - // local_one - // Stability: development - CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") - // any - // Stability: development - CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") - // serial - // Stability: development - CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") - // local_serial - // Stability: development - CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") -) - -// Namespace: cicd -const ( - // CICDPipelineActionNameKey is the attribute Key conforming to the - // "cicd.pipeline.action.name" semantic conventions. It represents the kind of - // action a pipeline run is performing. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BUILD", "RUN", "SYNC" - CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") - - // CICDPipelineNameKey is the attribute Key conforming to the - // "cicd.pipeline.name" semantic conventions. It represents the human readable - // name of the pipeline within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Build and Test", "Lint", "Deploy Go Project", - // "deploy_to_environment" - CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") - - // CICDPipelineResultKey is the attribute Key conforming to the - // "cicd.pipeline.result" semantic conventions. It represents the result of a - // pipeline run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") - - // CICDPipelineRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.run.id" semantic conventions. It represents the unique - // identifier of a pipeline run within a CI/CD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "120912" - CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") - - // CICDPipelineRunStateKey is the attribute Key conforming to the - // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline - // run goes through these states during its lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pending", "executing", "finalizing" - CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") - - // CICDPipelineRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of - // the pipeline run, providing the complete address in order to locate and - // identify the pipeline run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") - - // CICDPipelineTaskNameKey is the attribute Key conforming to the - // "cicd.pipeline.task.name" semantic conventions. It represents the human - // readable name of a task within a pipeline. Task here most closely aligns with - // a [computing process] in a pipeline. Other terms for tasks include commands, - // steps, and procedures. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" - // - // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) - CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") - - // CICDPipelineTaskRunIDKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique - // identifier of a task run within a pipeline. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "12097" - CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") - - // CICDPipelineTaskRunResultKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.result" semantic conventions. It represents the - // result of a task run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "timeout", "skipped" - CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") - - // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the - // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the - // [URL] of the pipeline task run, providing the complete address in order to - // locate and identify the pipeline task run. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") - - // CICDPipelineTaskTypeKey is the attribute Key conforming to the - // "cicd.pipeline.task.type" semantic conventions. It represents the type of the - // task within a pipeline. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "build", "test", "deploy" - CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") - - // CICDSystemComponentKey is the attribute Key conforming to the - // "cicd.system.component" semantic conventions. It represents the name of a - // component of the CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "controller", "scheduler", "agent" - CICDSystemComponentKey = attribute.Key("cicd.system.component") - - // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" - // semantic conventions. It represents the unique identifier of a worker within - // a CICD system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "abc123", "10.0.1.2", "controller" - CICDWorkerIDKey = attribute.Key("cicd.worker.id") - - // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" - // semantic conventions. It represents the name of a worker within a CICD - // system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "agent-abc", "controller", "Ubuntu LTS" - CICDWorkerNameKey = attribute.Key("cicd.worker.name") - - // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" - // semantic conventions. It represents the state of a CICD worker / agent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle", "busy", "down" - CICDWorkerStateKey = attribute.Key("cicd.worker.state") - - // CICDWorkerURLFullKey is the attribute Key conforming to the - // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the - // worker, providing the complete address in order to locate and identify the - // worker. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://cicd.example.org/worker/abc123" - // - // [URL]: https://wikipedia.org/wiki/URL - CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") -) - -// CICDPipelineName returns an attribute KeyValue conforming to the -// "cicd.pipeline.name" semantic conventions. It represents the human readable -// name of the pipeline within a CI/CD system. -func CICDPipelineName(val string) attribute.KeyValue { - return CICDPipelineNameKey.String(val) -} - -// CICDPipelineRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.id" semantic conventions. It represents the unique -// identifier of a pipeline run within a CI/CD system. -func CICDPipelineRunID(val string) attribute.KeyValue { - return CICDPipelineRunIDKey.String(val) -} - -// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of -// the pipeline run, providing the complete address in order to locate and -// identify the pipeline run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineRunURLFull(val string) attribute.KeyValue { - return CICDPipelineRunURLFullKey.String(val) -} - -// CICDPipelineTaskName returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.name" semantic conventions. It represents the human -// readable name of a task within a pipeline. Task here most closely aligns with -// a [computing process] in a pipeline. Other terms for tasks include commands, -// steps, and procedures. -// -// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) -func CICDPipelineTaskName(val string) attribute.KeyValue { - return CICDPipelineTaskNameKey.String(val) -} - -// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique -// identifier of a task run within a pipeline. -func CICDPipelineTaskRunID(val string) attribute.KeyValue { - return CICDPipelineTaskRunIDKey.String(val) -} - -// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the -// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the -// [URL] of the pipeline task run, providing the complete address in order to -// locate and identify the pipeline task run. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { - return CICDPipelineTaskRunURLFullKey.String(val) -} - -// CICDSystemComponent returns an attribute KeyValue conforming to the -// "cicd.system.component" semantic conventions. It represents the name of a -// component of the CICD system. -func CICDSystemComponent(val string) attribute.KeyValue { - return CICDSystemComponentKey.String(val) -} - -// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" -// semantic conventions. It represents the unique identifier of a worker within a -// CICD system. -func CICDWorkerID(val string) attribute.KeyValue { - return CICDWorkerIDKey.String(val) -} - -// CICDWorkerName returns an attribute KeyValue conforming to the -// "cicd.worker.name" semantic conventions. It represents the name of a worker -// within a CICD system. -func CICDWorkerName(val string) attribute.KeyValue { - return CICDWorkerNameKey.String(val) -} - -// CICDWorkerURLFull returns an attribute KeyValue conforming to the -// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the -// worker, providing the complete address in order to locate and identify the -// worker. -// -// [URL]: https://wikipedia.org/wiki/URL -func CICDWorkerURLFull(val string) attribute.KeyValue { - return CICDWorkerURLFullKey.String(val) -} - -// Enum values for cicd.pipeline.action.name -var ( - // The pipeline run is executing a build. - // Stability: development - CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") - // The pipeline run is executing. - // Stability: development - CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") - // The pipeline run is executing a sync. - // Stability: development - CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") -) - -// Enum values for cicd.pipeline.result -var ( - // The pipeline run finished successfully. - // Stability: development - CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") - // The pipeline run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the pipeline run. - // Stability: development - CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") - // The pipeline run failed due to an error in the CICD system, eg. due to the - // worker being killed. - // Stability: development - CICDPipelineResultError = CICDPipelineResultKey.String("error") - // A timeout caused the pipeline run to be interrupted. - // Stability: development - CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") - // The pipeline run was cancelled, eg. by a user manually cancelling the - // pipeline run. - // Stability: development - CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") - // The pipeline run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") -) - -// Enum values for cicd.pipeline.run.state -var ( - // The run pending state spans from the event triggering the pipeline run until - // the execution of the run starts (eg. time spent in a queue, provisioning - // agents, creating run resources). - // - // Stability: development - CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") - // The executing state spans the execution of any run tasks (eg. build, test). - // Stability: development - CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") - // The finalizing state spans from when the run has finished executing (eg. - // cleanup of run resources). - // Stability: development - CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") -) - -// Enum values for cicd.pipeline.task.run.result -var ( - // The task run finished successfully. - // Stability: development - CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") - // The task run did not finish successfully, eg. due to a compile error or a - // failing test. Such failures are usually detected by non-zero exit codes of - // the tools executed in the task run. - // Stability: development - CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") - // The task run failed due to an error in the CICD system, eg. due to the worker - // being killed. - // Stability: development - CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") - // A timeout caused the task run to be interrupted. - // Stability: development - CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") - // The task run was cancelled, eg. by a user manually cancelling the task run. - // Stability: development - CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") - // The task run was skipped, eg. due to a precondition not being met. - // Stability: development - CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") -) - -// Enum values for cicd.pipeline.task.type -var ( - // build - // Stability: development - CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") - // test - // Stability: development - CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") - // deploy - // Stability: development - CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") -) - -// Enum values for cicd.worker.state -var ( - // The worker is not performing work for the CICD system. It is available to the - // CICD system to perform work on (online / idle). - // Stability: development - CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") - // The worker is performing work for the CICD system. - // Stability: development - CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") - // The worker is not available to the CICD system (disconnected / down). - // Stability: development - CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") -) - -// Namespace: client -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.address` SHOULD represent the client address behind any - // intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" semantic - // conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - // Note: When observed from the server side, and when communicating through an - // intermediary, `client.port` SHOULD represent the client port behind any - // intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the "client.address" -// semantic conventions. It represents the client address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// Namespace: cloud -const ( - // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" - // semantic conventions. It represents the cloud account ID the resource is - // assigned to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "111111111111", "opentelemetry" - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to increase - // availability. Availability zone represents the zone where the resource is - // running. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-east-1c" - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic - // conventions. It represents the geographical region within a cloud provider. - // When associated with a resource, this attribute specifies the region where - // the resource operates. When calling services or APIs deployed on a cloud, - // this attribute identifies the region where the called destination is - // deployed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1", "us-east-1" - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions], [AWS regions], [Azure regions], - // [Google Cloud regions], or [Tencent Cloud regions]. - // - // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm - // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ - // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ - // [Google Cloud regions]: https://cloud.google.com/about/locations - // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" - // semantic conventions. It represents the cloud provider-specific native - // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a - // [fully qualified resource ID] on Azure, a [full resource name] on GCP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", - // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", - // "/subscriptions//resourceGroups/ - // /providers/Microsoft.Web/sites//functions/" - // Note: On some cloud providers, it may not be possible to determine the full - // ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud provider. - // The following well-known definitions MUST be used if you set this attribute - // and they apply: - // - // - **AWS Lambda:** The function [ARN]. - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix] - // with the resolved function version, as the same runtime instance may be - // invocable with - // multiple different aliases. - // - **GCP:** The [URI of the resource] - // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, - // *not* the function app, having the form - // - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` - // . - // This means that a span attribute MUST be used, as an Azure function app - // can host multiple functions that would usually share - // a TracerProvider. - // - // - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id - // [full resource name]: https://google.aip.dev/122#full-resource-names - // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html - // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html - // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names - // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" -// semantic conventions. It represents the geographical region within a cloud -// provider. When associated with a resource, this attribute specifies the region -// where the resource operates. When calling services or APIs deployed on a -// cloud, this attribute identifies the region where the called destination is -// deployed. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] -// on GCP). -// -// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id -// [full resource name]: https://google.aip.dev/122#full-resource-names -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// Enum values for cloud.platform -var ( - // Alibaba Cloud Elastic Compute Service - // Stability: development - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - // Stability: development - CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - // Stability: development - CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - // Stability: development - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - // Stability: development - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - // Stability: development - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - // Stability: development - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - // Stability: development - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - // Stability: development - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - // Stability: development - CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - // Stability: development - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Apps - // Stability: development - CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") - // Azure Container Instances - // Stability: development - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - // Stability: development - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - // Stability: development - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - // Stability: development - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - // Stability: development - CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - // Stability: development - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - // Stability: development - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - // Stability: development - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - // Stability: development - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - // Stability: development - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - // Stability: development - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - // Stability: development - CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - // Stability: development - CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") - // Compute on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") - // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") - // Tencent Cloud Cloud Virtual Machine (CVM) - // Stability: development - CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - // Stability: development - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - // Stability: development - CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") -) - -// Enum values for cloud.provider -var ( - // Alibaba Cloud - // Stability: development - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - // Stability: development - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - // Stability: development - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - // Stability: development - CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") - // Oracle Cloud Infrastructure (OCI) - // Stability: development - CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") - // Tencent Cloud - // Stability: development - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// Namespace: cloudevents -const ( - // CloudEventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the [event_id] - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" - // - // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id - CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudEventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the [source] - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", - // "my-service" - // - // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 - CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudEventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents specification] which the event uses. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - // - // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion - CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudEventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the [subject] - // of the event in the context of the event producer (identified by source). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: mynewfile.jpg - // - // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject - CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudEventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the [event_type] - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" - // - // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type - CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudEventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the [event_id] -// uniquely identifies the event. -// -// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id -func CloudEventsEventID(val string) attribute.KeyValue { - return CloudEventsEventIDKey.String(val) -} - -// CloudEventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the [source] -// identifies the context in which an event happened. -// -// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 -func CloudEventsEventSource(val string) attribute.KeyValue { - return CloudEventsEventSourceKey.String(val) -} - -// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the -// "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents specification] which the event uses. -// -// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion -func CloudEventsEventSpecVersion(val string) attribute.KeyValue { - return CloudEventsEventSpecVersionKey.String(val) -} - -// CloudEventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the [subject] -// of the event in the context of the event producer (identified by source). -// -// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject -func CloudEventsEventSubject(val string) attribute.KeyValue { - return CloudEventsEventSubjectKey.String(val) -} - -// CloudEventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the [event_type] -// contains a value describing the type of event related to the originating -// occurrence. -// -// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type -func CloudEventsEventType(val string) attribute.KeyValue { - return CloudEventsEventTypeKey.String(val) -} - -// Namespace: cloudfoundry -const ( - // CloudFoundryAppIDKey is the attribute Key conforming to the - // "cloudfoundry.app.id" semantic conventions. It represents the guid of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_id`. This is the same value as - // reported by `cf app --guid`. - CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") - - // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.app.instance.id" semantic conventions. It represents the index - // of the application instance. 0 when just one instance is active. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0", "1" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the application instance index for applications - // deployed on the runtime. - // - // Application instrumentation should use the value from environment - // variable `CF_INSTANCE_INDEX`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") - - // CloudFoundryAppNameKey is the attribute Key conforming to the - // "cloudfoundry.app.name" semantic conventions. It represents the name of the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-app-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.application_name`. This is the same value - // as reported by `cf apps`. - CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") - - // CloudFoundryOrgIDKey is the attribute Key conforming to the - // "cloudfoundry.org.id" semantic conventions. It represents the guid of the - // CloudFoundry org the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_id`. This is the same value as - // reported by `cf org --guid`. - CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") - - // CloudFoundryOrgNameKey is the attribute Key conforming to the - // "cloudfoundry.org.name" semantic conventions. It represents the name of the - // CloudFoundry organization the app is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.org_name`. This is the same value as - // reported by `cf orgs`. - CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") - - // CloudFoundryProcessIDKey is the attribute Key conforming to the - // "cloudfoundry.process.id" semantic conventions. It represents the UID - // identifying the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to - // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. - // For system components, this could be the actual PID. - CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") - - // CloudFoundryProcessTypeKey is the attribute Key conforming to the - // "cloudfoundry.process.type" semantic conventions. It represents the type of - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "web" - // Note: CloudFoundry applications can consist of multiple jobs. Usually the - // main process will be of type `web`. There can be additional background - // tasks or side-cars with different process types. - CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") - - // CloudFoundrySpaceIDKey is the attribute Key conforming to the - // "cloudfoundry.space.id" semantic conventions. It represents the guid of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_id`. This is the same value as - // reported by `cf space --guid`. - CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") - - // CloudFoundrySpaceNameKey is the attribute Key conforming to the - // "cloudfoundry.space.name" semantic conventions. It represents the name of the - // CloudFoundry space the application is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-space-name" - // Note: Application instrumentation should use the value from environment - // variable `VCAP_APPLICATION.space_name`. This is the same value as - // reported by `cf spaces`. - CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") - - // CloudFoundrySystemIDKey is the attribute Key conforming to the - // "cloudfoundry.system.id" semantic conventions. It represents a guid or - // another name describing the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cf/gorouter" - // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the component name, e.g. "gorouter", for - // CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.id` should be set to - // `spec.deployment/spec.name`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") - - // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the - // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid - // describing the concrete instance of the event source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] - // . - // It is used for logs and metrics emitted by CloudFoundry. It is - // supposed to contain the vm id for CloudFoundry components. - // - // When system components are instrumented, values from the - // [Bosh spec] - // should be used. The `system.instance.id` should be set to `spec.id`. - // - // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope - // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec - CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") -) - -// CloudFoundryAppID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.id" semantic conventions. It represents the guid of the -// application. -func CloudFoundryAppID(val string) attribute.KeyValue { - return CloudFoundryAppIDKey.String(val) -} - -// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.app.instance.id" semantic conventions. It represents the index -// of the application instance. 0 when just one instance is active. -func CloudFoundryAppInstanceID(val string) attribute.KeyValue { - return CloudFoundryAppInstanceIDKey.String(val) -} - -// CloudFoundryAppName returns an attribute KeyValue conforming to the -// "cloudfoundry.app.name" semantic conventions. It represents the name of the -// application. -func CloudFoundryAppName(val string) attribute.KeyValue { - return CloudFoundryAppNameKey.String(val) -} - -// CloudFoundryOrgID returns an attribute KeyValue conforming to the -// "cloudfoundry.org.id" semantic conventions. It represents the guid of the -// CloudFoundry org the application is running in. -func CloudFoundryOrgID(val string) attribute.KeyValue { - return CloudFoundryOrgIDKey.String(val) -} - -// CloudFoundryOrgName returns an attribute KeyValue conforming to the -// "cloudfoundry.org.name" semantic conventions. It represents the name of the -// CloudFoundry organization the app is running in. -func CloudFoundryOrgName(val string) attribute.KeyValue { - return CloudFoundryOrgNameKey.String(val) -} - -// CloudFoundryProcessID returns an attribute KeyValue conforming to the -// "cloudfoundry.process.id" semantic conventions. It represents the UID -// identifying the process. -func CloudFoundryProcessID(val string) attribute.KeyValue { - return CloudFoundryProcessIDKey.String(val) -} - -// CloudFoundryProcessType returns an attribute KeyValue conforming to the -// "cloudfoundry.process.type" semantic conventions. It represents the type of -// process. -func CloudFoundryProcessType(val string) attribute.KeyValue { - return CloudFoundryProcessTypeKey.String(val) -} - -// CloudFoundrySpaceID returns an attribute KeyValue conforming to the -// "cloudfoundry.space.id" semantic conventions. It represents the guid of the -// CloudFoundry space the application is running in. -func CloudFoundrySpaceID(val string) attribute.KeyValue { - return CloudFoundrySpaceIDKey.String(val) -} - -// CloudFoundrySpaceName returns an attribute KeyValue conforming to the -// "cloudfoundry.space.name" semantic conventions. It represents the name of the -// CloudFoundry space the application is running in. -func CloudFoundrySpaceName(val string) attribute.KeyValue { - return CloudFoundrySpaceNameKey.String(val) -} - -// CloudFoundrySystemID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.id" semantic conventions. It represents a guid or another -// name describing the event source. -func CloudFoundrySystemID(val string) attribute.KeyValue { - return CloudFoundrySystemIDKey.String(val) -} - -// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the -// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid -// describing the concrete instance of the event source. -func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { - return CloudFoundrySystemInstanceIDKey.String(val) -} - -// Namespace: code -const ( - // CodeColumnNumberKey is the attribute Key conforming to the - // "code.column.number" semantic conventions. It represents the column number in - // `code.file.path` best representing the operation. It SHOULD point within the - // code unit named in `code.function.name`. This attribute MUST NOT be used on - // the Profile signal since the data is already captured in 'message Line'. This - // constraint is imposed to prevent redundancy and maintain data integrity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - CodeColumnNumberKey = attribute.Key("code.column.number") - - // CodeFilePathKey is the attribute Key conforming to the "code.file.path" - // semantic conventions. It represents the source code file name that identifies - // the code unit as uniquely as possible (preferably an absolute file path). - // This attribute MUST NOT be used on the Profile signal since the data is - // already captured in 'message Function'. This constraint is imposed to prevent - // redundancy and maintain data integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: /usr/local/MyApplication/content_root/app/index.php - CodeFilePathKey = attribute.Key("code.file.path") - - // CodeFunctionNameKey is the attribute Key conforming to the - // "code.function.name" semantic conventions. It represents the method or - // function fully-qualified name without arguments. The value should fit the - // natural representation of the language runtime, which is also likely the same - // used within `code.stacktrace` attribute value. This attribute MUST NOT be - // used on the Profile signal since the data is already captured in 'message - // Function'. This constraint is imposed to prevent redundancy and maintain data - // integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "com.example.MyHttpService.serveRequest", - // "GuzzleHttp\Client::transfer", "fopen" - // Note: Values and format depends on each language runtime, thus it is - // impossible to provide an exhaustive list of examples. - // The values are usually the same (or prefixes of) the ones found in native - // stack trace representation stored in - // `code.stacktrace` without information on arguments. - // - // Examples: - // - // - Java method: `com.example.MyHttpService.serveRequest` - // - Java anonymous class method: `com.mycompany.Main$1.myMethod` - // - Java lambda method: - // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` - // - PHP function: `GuzzleHttp\Client::transfer` - // - Go function: `github.com/my/repo/pkg.foo.func5` - // - Elixir: `OpenTelemetry.Ctx.new` - // - Erlang: `opentelemetry_ctx:new` - // - Rust: `playground::my_module::my_cool_func` - // - C function: `fopen` - CodeFunctionNameKey = attribute.Key("code.function.name") - - // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" - // semantic conventions. It represents the line number in `code.file.path` best - // representing the operation. It SHOULD point within the code unit named in - // `code.function.name`. This attribute MUST NOT be used on the Profile signal - // since the data is already captured in 'message Line'. This constraint is - // imposed to prevent redundancy and maintain data integrity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - CodeLineNumberKey = attribute.Key("code.line.number") - - // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" - // semantic conventions. It represents a stacktrace as a string in the natural - // representation for the language runtime. The representation is identical to - // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile - // signal since the data is already captured in 'message Location'. This - // constraint is imposed to prevent redundancy and maintain data integrity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - // - // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumnNumber returns an attribute KeyValue conforming to the -// "code.column.number" semantic conventions. It represents the column number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. This attribute MUST NOT be used on -// the Profile signal since the data is already captured in 'message Line'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -func CodeColumnNumber(val int) attribute.KeyValue { - return CodeColumnNumberKey.Int(val) -} - -// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" -// semantic conventions. It represents the source code file name that identifies -// the code unit as uniquely as possible (preferably an absolute file path). This -// attribute MUST NOT be used on the Profile signal since the data is already -// captured in 'message Function'. This constraint is imposed to prevent -// redundancy and maintain data integrity. -func CodeFilePath(val string) attribute.KeyValue { - return CodeFilePathKey.String(val) -} - -// CodeFunctionName returns an attribute KeyValue conforming to the -// "code.function.name" semantic conventions. It represents the method or -// function fully-qualified name without arguments. The value should fit the -// natural representation of the language runtime, which is also likely the same -// used within `code.stacktrace` attribute value. This attribute MUST NOT be used -// on the Profile signal since the data is already captured in 'message -// Function'. This constraint is imposed to prevent redundancy and maintain data -// integrity. -func CodeFunctionName(val string) attribute.KeyValue { - return CodeFunctionNameKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the -// "code.line.number" semantic conventions. It represents the line number in -// `code.file.path` best representing the operation. It SHOULD point within the -// code unit named in `code.function.name`. This attribute MUST NOT be used on -// the Profile signal since the data is already captured in 'message Line'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a string -// in the natural representation for the language runtime. The representation is -// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the -// Profile signal since the data is already captured in 'message Location'. This -// constraint is imposed to prevent redundancy and maintain data integrity. -// -// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// Namespace: container -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used to - // run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol" - // Note: If using embedded credentials or sensitive data, it is recommended to - // remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol", "--config", "config.yaml" - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full command - // run by the container as a single string representing the full command. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcontribcol --config config.yaml" - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerCSIPluginNameKey is the attribute Key conforming to the - // "container.csi.plugin.name" semantic conventions. It represents the name of - // the CSI ([Container Storage Interface]) plugin used by the volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pd.csi.storage.gke.io" - // Note: This can sometimes be referred to as a "driver" in CSI implementations. - // This should represent the `name` field of the GetPluginInfo RPC. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") - - // ContainerCSIVolumeIDKey is the attribute Key conforming to the - // "container.csi.volume.id" semantic conventions. It represents the unique - // volume ID returned by the CSI ([Container Storage Interface]) plugin. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" - // Note: This can sometimes be referred to as a "volume handle" in CSI - // implementations. This should represent the `Volume.volume_id` field in CSI - // spec. - // - // [Container Storage Interface]: https://github.com/container-storage-interface/spec - ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") - - // ContainerIDKey is the attribute Key conforming to the "container.id" semantic - // conventions. It represents the container ID. Usually a UUID, as for example - // used to [identify Docker containers]. The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a3bf90e006b2" - // - // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime specific - // image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect [API] - // endpoint. - // K8s defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` - // . - // The ID is assigned by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - // - // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of the - // image the container was built on. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gcr.io/opentelemetry/operator" - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the repo - // digests of the container image as provided by the container runtime. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", - // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - // Note: [Docker] and [CRI] report those under the `RepoDigests` field. - // - // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect - // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image Inspect]. Should be only - // the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "v1.27.1", "3.5.7-0" - // - // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-autoconf" - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container runtime - // managing this container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "docker", "containerd", "rkt" - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full command -// run by the container as a single string representing the full command. -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerCSIPluginName returns an attribute KeyValue conforming to the -// "container.csi.plugin.name" semantic conventions. It represents the name of -// the CSI ([Container Storage Interface]) plugin used by the volume. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCSIPluginName(val string) attribute.KeyValue { - return ContainerCSIPluginNameKey.String(val) -} - -// ContainerCSIVolumeID returns an attribute KeyValue conforming to the -// "container.csi.volume.id" semantic conventions. It represents the unique -// volume ID returned by the CSI ([Container Storage Interface]) plugin. -// -// [Container Storage Interface]: https://github.com/container-storage-interface/spec -func ContainerCSIVolumeID(val string) attribute.KeyValue { - return ContainerCSIVolumeIDKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the "container.id" -// semantic conventions. It represents the container ID. Usually a UUID, as for -// example used to [identify Docker containers]. The UUID might be abbreviated. -// -// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime specific -// image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container image -// tags. An example can be found in [Docker Image Inspect]. Should be only the -// `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -// -// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerLabel returns an attribute KeyValue conforming to the -// "container.label" semantic conventions. It represents the container labels, -// `` being the label name, the value being the label value. -func ContainerLabel(key string, val string) attribute.KeyValue { - return attribute.String("container.label."+key, val) -} - -// ContainerName returns an attribute KeyValue conforming to the "container.name" -// semantic conventions. It represents the container name used by container -// runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container runtime -// managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Namespace: cpu -const ( - // CPULogicalNumberKey is the attribute Key conforming to the - // "cpu.logical_number" semantic conventions. It represents the logical CPU - // number [0..n-1]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - CPULogicalNumberKey = attribute.Key("cpu.logical_number") - - // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic - // conventions. It represents the mode of the CPU. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "user", "system" - CPUModeKey = attribute.Key("cpu.mode") -) - -// CPULogicalNumber returns an attribute KeyValue conforming to the -// "cpu.logical_number" semantic conventions. It represents the logical CPU -// number [0..n-1]. -func CPULogicalNumber(val int) attribute.KeyValue { - return CPULogicalNumberKey.Int(val) -} - -// Enum values for cpu.mode -var ( - // user - // Stability: development - CPUModeUser = CPUModeKey.String("user") - // system - // Stability: development - CPUModeSystem = CPUModeKey.String("system") - // nice - // Stability: development - CPUModeNice = CPUModeKey.String("nice") - // idle - // Stability: development - CPUModeIdle = CPUModeKey.String("idle") - // iowait - // Stability: development - CPUModeIOWait = CPUModeKey.String("iowait") - // interrupt - // Stability: development - CPUModeInterrupt = CPUModeKey.String("interrupt") - // steal - // Stability: development - CPUModeSteal = CPUModeKey.String("steal") - // kernel - // Stability: development - CPUModeKernel = CPUModeKey.String("kernel") -) - -// Namespace: db -const ( - // DBClientConnectionPoolNameKey is the attribute Key conforming to the - // "db.client.connection.pool.name" semantic conventions. It represents the name - // of the connection pool; unique within the instrumented application. In case - // the connection pool implementation doesn't provide a name, instrumentation - // SHOULD use a combination of parameters that would make the name unique, for - // example, combining attributes `server.address`, `server.port`, and - // `db.namespace`, formatted as `server.address:server.port/db.namespace`. - // Instrumentations that generate connection pool name following different - // patterns SHOULD document it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myDataSource" - DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") - - // DBClientConnectionStateKey is the attribute Key conforming to the - // "db.client.connection.state" semantic conventions. It represents the state of - // a connection in the pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "idle" - DBClientConnectionStateKey = attribute.Key("db.client.connection.state") - - // DBCollectionNameKey is the attribute Key conforming to the - // "db.collection.name" semantic conventions. It represents the name of a - // collection (table, container) within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "public.users", "customers" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The collection name SHOULD NOT be extracted from `db.query.text`, - // when the database system supports query text with multiple collections - // in non-batch operations. - // - // For batch operations, if the individual operations are known to have the same - // collection name then that collection name SHOULD be used. - DBCollectionNameKey = attribute.Key("db.collection.name") - - // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic - // conventions. It represents the name of the database, fully qualified within - // the server address and port. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "customers", "test.users" - // Note: If a database system has multiple namespace components, they SHOULD be - // concatenated from the most general to the most specific namespace component, - // using `|` as a separator between the components. Any missing components (and - // their associated separators) SHOULD be omitted. - // Semantic conventions for individual database systems SHOULD document what - // `db.namespace` means in the context of that system. - // It is RECOMMENDED to capture the value as provided by the application without - // attempting to do any case normalization. - DBNamespaceKey = attribute.Key("db.namespace") - - // DBOperationBatchSizeKey is the attribute Key conforming to the - // "db.operation.batch.size" semantic conventions. It represents the number of - // queries included in a batch operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 2, 3, 4 - // Note: Operations are only considered batches when they contain two or more - // operations, and so `db.operation.batch.size` SHOULD never be `1`. - DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") - - // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" - // semantic conventions. It represents the name of the operation or command - // being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "findAndModify", "HMSET", "SELECT" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // The operation name SHOULD NOT be extracted from `db.query.text`, - // when the database system supports query text with multiple operations - // in non-batch operations. - // - // If spaces can occur in the operation name, multiple consecutive spaces - // SHOULD be normalized to a single space. - // - // For batch operations, if the individual operations are known to have the same - // operation name - // then that operation name SHOULD be used prepended by `BATCH `, - // otherwise `db.operation.name` SHOULD be `BATCH` or some other database - // system specific term if more applicable. - DBOperationNameKey = attribute.Key("db.operation.name") - - // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" - // semantic conventions. It represents the low cardinality summary of a database - // query. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get - // user by id" - // Note: The query summary describes a class of database queries and is useful - // as a grouping key, especially when analyzing telemetry for database - // calls involving complex queries. - // - // Summary may be available to the instrumentation through - // instrumentation hooks or other means. If it is not available, - // instrumentations - // that support query parsing SHOULD generate a summary following - // [Generating query summary] - // section. - // - // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query - DBQuerySummaryKey = attribute.Key("db.query.summary") - - // DBQueryTextKey is the attribute Key conforming to the "db.query.text" - // semantic conventions. It represents the database query being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" - // Note: For sanitization see [Sanitization of `db.query.text`]. - // For batch operations, if the individual operations are known to have the same - // query text then that query text SHOULD be used, otherwise all of the - // individual query texts SHOULD be concatenated with separator `; ` or some - // other database system specific separator if more applicable. - // Parameterized query text SHOULD NOT be sanitized. Even though parameterized - // query text can potentially have sensitive data, by using a parameterized - // query the user is giving a strong signal that any sensitive data will be - // passed as parameter values, and the benefit to observability of capturing the - // static part of the query text by default outweighs the risk. - // - // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext - DBQueryTextKey = attribute.Key("db.query.text") - - // DBResponseReturnedRowsKey is the attribute Key conforming to the - // "db.response.returned_rows" semantic conventions. It represents the number of - // rows returned by the operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 10, 30, 1000 - DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") - - // DBResponseStatusCodeKey is the attribute Key conforming to the - // "db.response.status_code" semantic conventions. It represents the database - // response status code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "102", "ORA-17002", "08P01", "404" - // Note: The status code returned by the database. Usually it represents an - // error code, but may also represent partial success, warning, or differentiate - // between various types of successful outcomes. - // Semantic conventions for individual database systems SHOULD document what - // `db.response.status_code` means in the context of that system. - DBResponseStatusCodeKey = attribute.Key("db.response.status_code") - - // DBStoredProcedureNameKey is the attribute Key conforming to the - // "db.stored_procedure.name" semantic conventions. It represents the name of a - // stored procedure within the database. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GetCustomer" - // Note: It is RECOMMENDED to capture the value as provided by the application - // without attempting to do any case normalization. - // - // For batch operations, if the individual operations are known to have the same - // stored procedure name then that stored procedure name SHOULD be used. - DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") - - // DBSystemNameKey is the attribute Key conforming to the "db.system.name" - // semantic conventions. It represents the database management system (DBMS) - // product as identified by the client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - // Note: The actual DBMS may differ from the one identified by the client. For - // example, when using PostgreSQL client libraries to connect to a CockroachDB, - // the `db.system.name` is set to `postgresql` based on the instrumentation's - // best knowledge. - DBSystemNameKey = attribute.Key("db.system.name") -) - -// DBClientConnectionPoolName returns an attribute KeyValue conforming to the -// "db.client.connection.pool.name" semantic conventions. It represents the name -// of the connection pool; unique within the instrumented application. In case -// the connection pool implementation doesn't provide a name, instrumentation -// SHOULD use a combination of parameters that would make the name unique, for -// example, combining attributes `server.address`, `server.port`, and -// `db.namespace`, formatted as `server.address:server.port/db.namespace`. -// Instrumentations that generate connection pool name following different -// patterns SHOULD document it. -func DBClientConnectionPoolName(val string) attribute.KeyValue { - return DBClientConnectionPoolNameKey.String(val) -} - -// DBCollectionName returns an attribute KeyValue conforming to the -// "db.collection.name" semantic conventions. It represents the name of a -// collection (table, container) within the database. -func DBCollectionName(val string) attribute.KeyValue { - return DBCollectionNameKey.String(val) -} - -// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" -// semantic conventions. It represents the name of the database, fully qualified -// within the server address and port. -func DBNamespace(val string) attribute.KeyValue { - return DBNamespaceKey.String(val) -} - -// DBOperationBatchSize returns an attribute KeyValue conforming to the -// "db.operation.batch.size" semantic conventions. It represents the number of -// queries included in a batch operation. -func DBOperationBatchSize(val int) attribute.KeyValue { - return DBOperationBatchSizeKey.Int(val) -} - -// DBOperationName returns an attribute KeyValue conforming to the -// "db.operation.name" semantic conventions. It represents the name of the -// operation or command being executed. -func DBOperationName(val string) attribute.KeyValue { - return DBOperationNameKey.String(val) -} - -// DBOperationParameter returns an attribute KeyValue conforming to the -// "db.operation.parameter" semantic conventions. It represents a database -// operation parameter, with `` being the parameter name, and the attribute -// value being a string representation of the parameter value. -func DBOperationParameter(key string, val string) attribute.KeyValue { - return attribute.String("db.operation.parameter."+key, val) -} - -// DBQueryParameter returns an attribute KeyValue conforming to the -// "db.query.parameter" semantic conventions. It represents a database query -// parameter, with `` being the parameter name, and the attribute value -// being a string representation of the parameter value. -func DBQueryParameter(key string, val string) attribute.KeyValue { - return attribute.String("db.query.parameter."+key, val) -} - -// DBQuerySummary returns an attribute KeyValue conforming to the -// "db.query.summary" semantic conventions. It represents the low cardinality -// summary of a database query. -func DBQuerySummary(val string) attribute.KeyValue { - return DBQuerySummaryKey.String(val) -} - -// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" -// semantic conventions. It represents the database query being executed. -func DBQueryText(val string) attribute.KeyValue { - return DBQueryTextKey.String(val) -} - -// DBResponseReturnedRows returns an attribute KeyValue conforming to the -// "db.response.returned_rows" semantic conventions. It represents the number of -// rows returned by the operation. -func DBResponseReturnedRows(val int) attribute.KeyValue { - return DBResponseReturnedRowsKey.Int(val) -} - -// DBResponseStatusCode returns an attribute KeyValue conforming to the -// "db.response.status_code" semantic conventions. It represents the database -// response status code. -func DBResponseStatusCode(val string) attribute.KeyValue { - return DBResponseStatusCodeKey.String(val) -} - -// DBStoredProcedureName returns an attribute KeyValue conforming to the -// "db.stored_procedure.name" semantic conventions. It represents the name of a -// stored procedure within the database. -func DBStoredProcedureName(val string) attribute.KeyValue { - return DBStoredProcedureNameKey.String(val) -} - -// Enum values for db.client.connection.state -var ( - // idle - // Stability: development - DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") - // used - // Stability: development - DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") -) - -// Enum values for db.system.name -var ( - // Some other SQL database. Fallback only. - // Stability: development - DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") - // [Adabas (Adaptable Database System)] - // Stability: development - // - // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas - DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") - // [Actian Ingres] - // Stability: development - // - // [Actian Ingres]: https://www.actian.com/databases/ingres/ - DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") - // [Amazon DynamoDB] - // Stability: development - // - // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ - DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") - // [Amazon Redshift] - // Stability: development - // - // [Amazon Redshift]: https://aws.amazon.com/redshift/ - DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") - // [Azure Cosmos DB] - // Stability: development - // - // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db - DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") - // [InterSystems Caché] - // Stability: development - // - // [InterSystems Caché]: https://www.intersystems.com/products/cache/ - DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") - // [Apache Cassandra] - // Stability: development - // - // [Apache Cassandra]: https://cassandra.apache.org/ - DBSystemNameCassandra = DBSystemNameKey.String("cassandra") - // [ClickHouse] - // Stability: development - // - // [ClickHouse]: https://clickhouse.com/ - DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") - // [CockroachDB] - // Stability: development - // - // [CockroachDB]: https://www.cockroachlabs.com/ - DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") - // [Couchbase] - // Stability: development - // - // [Couchbase]: https://www.couchbase.com/ - DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") - // [Apache CouchDB] - // Stability: development - // - // [Apache CouchDB]: https://couchdb.apache.org/ - DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") - // [Apache Derby] - // Stability: development - // - // [Apache Derby]: https://db.apache.org/derby/ - DBSystemNameDerby = DBSystemNameKey.String("derby") - // [Elasticsearch] - // Stability: development - // - // [Elasticsearch]: https://www.elastic.co/elasticsearch - DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") - // [Firebird] - // Stability: development - // - // [Firebird]: https://www.firebirdsql.org/ - DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") - // [Google Cloud Spanner] - // Stability: development - // - // [Google Cloud Spanner]: https://cloud.google.com/spanner - DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") - // [Apache Geode] - // Stability: development - // - // [Apache Geode]: https://geode.apache.org/ - DBSystemNameGeode = DBSystemNameKey.String("geode") - // [H2 Database] - // Stability: development - // - // [H2 Database]: https://h2database.com/ - DBSystemNameH2database = DBSystemNameKey.String("h2database") - // [Apache HBase] - // Stability: development - // - // [Apache HBase]: https://hbase.apache.org/ - DBSystemNameHBase = DBSystemNameKey.String("hbase") - // [Apache Hive] - // Stability: development - // - // [Apache Hive]: https://hive.apache.org/ - DBSystemNameHive = DBSystemNameKey.String("hive") - // [HyperSQL Database] - // Stability: development - // - // [HyperSQL Database]: https://hsqldb.org/ - DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") - // [IBM Db2] - // Stability: development - // - // [IBM Db2]: https://www.ibm.com/db2 - DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") - // [IBM Informix] - // Stability: development - // - // [IBM Informix]: https://www.ibm.com/products/informix - DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") - // [IBM Netezza] - // Stability: development - // - // [IBM Netezza]: https://www.ibm.com/products/netezza - DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") - // [InfluxDB] - // Stability: development - // - // [InfluxDB]: https://www.influxdata.com/ - DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") - // [Instant] - // Stability: development - // - // [Instant]: https://www.instantdb.com/ - DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") - // [MariaDB] - // Stability: stable - // - // [MariaDB]: https://mariadb.org/ - DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") - // [Memcached] - // Stability: development - // - // [Memcached]: https://memcached.org/ - DBSystemNameMemcached = DBSystemNameKey.String("memcached") - // [MongoDB] - // Stability: development - // - // [MongoDB]: https://www.mongodb.com/ - DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") - // [Microsoft SQL Server] - // Stability: stable - // - // [Microsoft SQL Server]: https://www.microsoft.com/sql-server - DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") - // [MySQL] - // Stability: stable - // - // [MySQL]: https://www.mysql.com/ - DBSystemNameMySQL = DBSystemNameKey.String("mysql") - // [Neo4j] - // Stability: development - // - // [Neo4j]: https://neo4j.com/ - DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") - // [OpenSearch] - // Stability: development - // - // [OpenSearch]: https://opensearch.org/ - DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") - // [Oracle Database] - // Stability: development - // - // [Oracle Database]: https://www.oracle.com/database/ - DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") - // [PostgreSQL] - // Stability: stable - // - // [PostgreSQL]: https://www.postgresql.org/ - DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") - // [Redis] - // Stability: development - // - // [Redis]: https://redis.io/ - DBSystemNameRedis = DBSystemNameKey.String("redis") - // [SAP HANA] - // Stability: development - // - // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html - DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") - // [SAP MaxDB] - // Stability: development - // - // [SAP MaxDB]: https://maxdb.sap.com/ - DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") - // [SQLite] - // Stability: development - // - // [SQLite]: https://www.sqlite.org/ - DBSystemNameSQLite = DBSystemNameKey.String("sqlite") - // [Teradata] - // Stability: development - // - // [Teradata]: https://www.teradata.com/ - DBSystemNameTeradata = DBSystemNameKey.String("teradata") - // [Trino] - // Stability: development - // - // [Trino]: https://trino.io/ - DBSystemNameTrino = DBSystemNameKey.String("trino") -) - -// Namespace: deployment -const ( - // DeploymentEnvironmentNameKey is the attribute Key conforming to the - // "deployment.environment.name" semantic conventions. It represents the name of - // the [deployment environment] (aka deployment tier). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "staging", "production" - // Note: `deployment.environment.name` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` resource - // attributes. - // This implies that resources carrying the following attribute combinations - // MUST be - // considered to be identifying the same service: - // - // - `service.name=frontend`, `deployment.environment.name=production` - // - `service.name=frontend`, `deployment.environment.name=staging`. - // - // - // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment - DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") - - // DeploymentIDKey is the attribute Key conforming to the "deployment.id" - // semantic conventions. It represents the id of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1208" - DeploymentIDKey = attribute.Key("deployment.id") - - // DeploymentNameKey is the attribute Key conforming to the "deployment.name" - // semantic conventions. It represents the name of the deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "deploy my app", "deploy-frontend" - DeploymentNameKey = attribute.Key("deployment.name") - - // DeploymentStatusKey is the attribute Key conforming to the - // "deployment.status" semantic conventions. It represents the status of the - // deployment. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - DeploymentStatusKey = attribute.Key("deployment.status") -) - -// DeploymentEnvironmentName returns an attribute KeyValue conforming to the -// "deployment.environment.name" semantic conventions. It represents the name of -// the [deployment environment] (aka deployment tier). -// -// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment -func DeploymentEnvironmentName(val string) attribute.KeyValue { - return DeploymentEnvironmentNameKey.String(val) -} - -// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" -// semantic conventions. It represents the id of the deployment. -func DeploymentID(val string) attribute.KeyValue { - return DeploymentIDKey.String(val) -} - -// DeploymentName returns an attribute KeyValue conforming to the -// "deployment.name" semantic conventions. It represents the name of the -// deployment. -func DeploymentName(val string) attribute.KeyValue { - return DeploymentNameKey.String(val) -} - -// Enum values for deployment.status -var ( - // failed - // Stability: development - DeploymentStatusFailed = DeploymentStatusKey.String("failed") - // succeeded - // Stability: development - DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") -) - -// Namespace: destination -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the destination - // address - domain name if available without reverse DNS lookup; otherwise, IP - // address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the source side, and when communicating through an - // intermediary, `destination.address` SHOULD represent the destination address - // behind any intermediaries, for example proxies, if it's available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the "destination.port" - // semantic conventions. It represents the destination port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number. -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// Namespace: device -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123456789012345", "01:23:45:67:89:AB" - // Note: Its value SHOULD be identical for all apps on a device and it SHOULD - // NOT change if an app is uninstalled and re-installed. - // However, it might be resettable by the user for all apps on a device. - // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be - // used as values. - // - // More information about Android identifier best practices can be found [here] - // . - // - // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution - // > should be taken when storing personal data or anything which can identify a - // > user. GDPR and data protection laws may apply, - // > ensure you do your own due diligence.> Due to these reasons, this - // > identifier is not recommended for consumer applications and will likely - // > result in rejection from both Google Play and App Store. - // > However, it may be appropriate for specific enterprise scenarios, such as - // > kiosk devices or enterprise-managed devices, with appropriate compliance - // > clearance. - // > Any instrumentation providing this identifier MUST implement it as an - // > opt-in feature.> See [`app.installation.id`]> for a more - // > privacy-preserving alternative. - // - // [here]: https://developer.android.com/training/articles/user-data-ids - // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of the - // device manufacturer. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apple", "Samsung" - // Note: The Android OS provides this field via [Build]. iOS apps SHOULD - // hardcode the value `Apple`. - // - // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone3,4", "SM-G920F" - // Note: It's recommended this value represents a machine-readable version of - // the model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" - // semantic conventions. It represents the marketing name for the device model. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" - // Note: It's recommended this value represents a human-readable version of the - // device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic -// conventions. It represents a unique identifier representing the device. -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer. -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device. -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name for -// the device model. -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// Namespace: disk -const ( - // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" - // semantic conventions. It represents the disk IO operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "read" - DiskIODirectionKey = attribute.Key("disk.io.direction") -) - -// Enum values for disk.io.direction -var ( - // read - // Stability: development - DiskIODirectionRead = DiskIODirectionKey.String("read") - // write - // Stability: development - DiskIODirectionWrite = DiskIODirectionKey.String("write") -) - -// Namespace: dns -const ( - // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" - // semantic conventions. It represents the name being queried. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.example.com", "opentelemetry.io" - // Note: If the name field contains non-printable characters (below 32 or above - // 126), those characters should be represented as escaped base 10 integers - // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, - // and line feeds should be converted to \t, \r, and \n respectively. - DNSQuestionNameKey = attribute.Key("dns.question.name") -) - -// DNSQuestionName returns an attribute KeyValue conforming to the -// "dns.question.name" semantic conventions. It represents the name being -// queried. -func DNSQuestionName(val string) attribute.KeyValue { - return DNSQuestionNameKey.String(val) -} - -// Namespace: elasticsearch -const ( - // ElasticsearchNodeNameKey is the attribute Key conforming to the - // "elasticsearch.node.name" semantic conventions. It represents the represents - // the human-readable identifier of the node/instance to which a request was - // routed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-0000000001" - ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") -) - -// ElasticsearchNodeName returns an attribute KeyValue conforming to the -// "elasticsearch.node.name" semantic conventions. It represents the represents -// the human-readable identifier of the node/instance to which a request was -// routed. -func ElasticsearchNodeName(val string) attribute.KeyValue { - return ElasticsearchNodeNameKey.String(val) -} - -// Namespace: enduser -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic - // conventions. It represents the unique identifier of an end user in the - // system. It maybe a username, email address, or other identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "username" - // Note: Unique identifier of an end user in the system. - // - // > [!Warning] - // > This field contains sensitive (PII) information. - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" - // semantic conventions. It represents the pseudonymous identifier of an end - // user. This identifier should be a random value that is not directly linked or - // associated with the end user's actual identity. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "QdH5CAWJgqVT4rOr0qtumf" - // Note: Pseudonymous identifier of an end user. - // - // > [!Warning] - // > This field contains sensitive (linkable PII) information. - EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the unique identifier of an end user in -// the system. It maybe a username, email address, or other identifier. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserPseudoID returns an attribute KeyValue conforming to the -// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous -// identifier of an end user. This identifier should be a random value that is -// not directly linked or associated with the end user's actual identity. -func EnduserPseudoID(val string) attribute.KeyValue { - return EnduserPseudoIDKey.String(val) -} - -// Namespace: error -const ( - // ErrorMessageKey is the attribute Key conforming to the "error.message" - // semantic conventions. It represents a message providing more detail about an - // error in human-readable form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Unexpected input type: string", "The user has exceeded their - // storage quota" - // Note: `error.message` should provide additional context and detail about an - // error. - // It is NOT RECOMMENDED to duplicate the value of `error.type` in - // `error.message`. - // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in - // `error.message`. - // - // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded - // cardinality and overlap with span status. - ErrorMessageKey = attribute.Key("error.message") - - // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic - // conventions. It represents the describes a class of error the operation ended - // with. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "timeout", "java.net.UnknownHostException", - // "server_certificate_invalid", "500" - // Note: The `error.type` SHOULD be predictable, and SHOULD have low - // cardinality. - // - // When `error.type` is set to a type (e.g., an exception type), its - // canonical class name identifying the type within the artifact SHOULD be used. - // - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library SHOULD be - // low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query time - // when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT set - // `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as HTTP - // or gRPC status codes), - // it's RECOMMENDED to: - // - // - Use a domain-specific attribute - // - Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -// ErrorMessage returns an attribute KeyValue conforming to the "error.message" -// semantic conventions. It represents a message providing more detail about an -// error in human-readable form. -func ErrorMessage(val string) attribute.KeyValue { - return ErrorMessageKey.String(val) -} - -// Enum values for error.type -var ( - // A fallback error value to be used when the instrumentation doesn't define a - // custom value. - // - // Stability: stable - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// Namespace: exception -const ( - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: Exception in thread "main" java.lang.RuntimeException: Test - // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at - // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at - // com.example.GenerateTrace.main(GenerateTrace.java:5) - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the exception - // should be preferred over the static type in languages that support it. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "java.net.ConnectException", "OSError" - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the "exception.type" -// semantic conventions. It represents the type of the exception (its -// fully-qualified class name, if applicable). The dynamic type of the exception -// should be preferred over the static type in languages that support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Namespace: faas -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the serverless - // function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSColdstartKey = attribute.Key("faas.coldstart") - - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron Expression]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0/5 * * * ? * - // - // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name of - // the source on which the triggering operation was performed. For example, in - // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the - // database name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myBucketName", "myDbName" - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or S3 is - // the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "myFile.txt", "myTableName" - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the describes - // the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string containing - // the time when the data was accessed in the [ISO 8601] format expressed in - // [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a string, - // that will be potentially reused for other invocations to the same - // function/function version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" - // Note: - **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation ID of - // the current function invocation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - - // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" - // semantic conventions. It represents the name of the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: my-function - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud region of - // the invoked function. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: eu-central-1 - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" - // semantic conventions. It represents the amount of memory available to the - // serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information (which must be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this runtime - // instance executes. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-function", "myazurefunctionapp/some-function-name" - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function.name`] - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - // - // - **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - // - // - // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes - FaaSNameKey = attribute.Key("faas.name") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation time - // in the [ISO 8601] format expressed in [UTC]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 2020-01-23T13:47:06Z - // - // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html - // [UTC]: https://www.w3.org/TR/NOTE-datetime - FaaSTimeKey = attribute.Key("faas.time") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic - // conventions. It represents the type of the trigger which caused this function - // invocation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FaaSTriggerKey = attribute.Key("faas.trigger") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic - // conventions. It represents the immutable version of the function being - // executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "26", "pinkfroid-00002" - // Note: Depending on the cloud provider and platform, use: - // - // - **AWS Lambda:** The [function version] - // (an integer represented as a decimal string). - // - **Google Cloud Run (Services):** The [revision] - // (i.e., the function name plus the revision suffix). - // - **Google Cloud Functions:** The value of the - // [`K_REVISION` environment variable]. - // - **Azure Functions:** Not applicable. Do not set this attribute. - // - // - // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html - // [revision]: https://cloud.google.com/run/docs/managing/revisions - // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" -// semantic conventions. It represents a boolean that is true if the serverless -// function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic -// conventions. It represents a string containing the schedule period as -// [Cron Expression]. -// -// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of the -// source on which the triggering operation was performed. For example, in Cloud -// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database -// name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 is -// the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO 8601] format expressed in -// [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" -// semantic conventions. It represents the execution environment ID as a string, -// that will be potentially reused for other invocations to the same -// function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID of -// the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region of -// the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic -// conventions. It represents the name of the single function that this runtime -// instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic -// conventions. It represents a string containing the function invocation time in -// the [ISO 8601] format expressed in [UTC]. -// -// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html -// [UTC]: https://www.w3.org/TR/NOTE-datetime -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" -// semantic conventions. It represents the immutable version of the function -// being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// Enum values for faas.document.operation -var ( - // When a new object is created. - // Stability: development - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified. - // Stability: development - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted. - // Stability: development - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Enum values for faas.invoked_provider -var ( - // Alibaba Cloud - // Stability: development - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - // Stability: development - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - // Stability: development - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - // Stability: development - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - // Stability: development - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// Enum values for faas.trigger -var ( - // A response to some data source operation such as a database or filesystem - // read/write - // Stability: development - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - // Stability: development - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - // Stability: development - FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - // Stability: development - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - // Stability: development - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Namespace: feature_flag -const ( - // FeatureFlagContextIDKey is the attribute Key conforming to the - // "feature_flag.context.id" semantic conventions. It represents the unique - // identifier for the flag evaluation context. For example, the targeting key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" - FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") - - // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" - // semantic conventions. It represents the lookup key of the feature flag. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "logo-color" - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider.name" semantic conventions. It represents the - // identifies the feature flag provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Flag Manager" - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") - - // FeatureFlagResultReasonKey is the attribute Key conforming to the - // "feature_flag.result.reason" semantic conventions. It represents the reason - // code which shows how a feature flag value was determined. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "static", "targeting_match", "error", "default" - FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") - - // FeatureFlagResultValueKey is the attribute Key conforming to the - // "feature_flag.result.value" semantic conventions. It represents the evaluated - // value of the feature flag. - // - // Type: any - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "#ff0000", true, 3 - // Note: With some feature flag providers, feature flag results can be quite - // large or contain private or sensitive details. - // Because of this, `feature_flag.result.variant` is often the preferred - // attribute if it is available. - // - // It may be desirable to redact or otherwise limit the size and scope of - // `feature_flag.result.value` if possible. - // Because the evaluated flag value is unstructured and may be any type, it is - // left to the instrumentation author to determine how best to achieve this. - FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") - - // FeatureFlagResultVariantKey is the attribute Key conforming to the - // "feature_flag.result.variant" semantic conventions. It represents a semantic - // identifier for an evaluated flag value. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "red", "true", "on" - // Note: A semantic identifier, commonly referred to as a variant, provides a - // means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") - - // FeatureFlagSetIDKey is the attribute Key conforming to the - // "feature_flag.set.id" semantic conventions. It represents the identifier of - // the [flag set] to which the feature flag belongs. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "proj-1", "ab98sgs", "service1/dev" - // - // [flag set]: https://openfeature.dev/specification/glossary/#flag-set - FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") - - // FeatureFlagVersionKey is the attribute Key conforming to the - // "feature_flag.version" semantic conventions. It represents the version of the - // ruleset used during the evaluation. This may be any stable value which - // uniquely identifies the ruleset. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1", "01ABCDEF" - FeatureFlagVersionKey = attribute.Key("feature_flag.version") -) - -// FeatureFlagContextID returns an attribute KeyValue conforming to the -// "feature_flag.context.id" semantic conventions. It represents the unique -// identifier for the flag evaluation context. For example, the targeting key. -func FeatureFlagContextID(val string) attribute.KeyValue { - return FeatureFlagContextIDKey.String(val) -} - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the lookup key of the -// feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider.name" semantic conventions. It represents the -// identifies the feature flag provider. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagResultVariant returns an attribute KeyValue conforming to the -// "feature_flag.result.variant" semantic conventions. It represents a semantic -// identifier for an evaluated flag value. -func FeatureFlagResultVariant(val string) attribute.KeyValue { - return FeatureFlagResultVariantKey.String(val) -} - -// FeatureFlagSetID returns an attribute KeyValue conforming to the -// "feature_flag.set.id" semantic conventions. It represents the identifier of -// the [flag set] to which the feature flag belongs. -// -// [flag set]: https://openfeature.dev/specification/glossary/#flag-set -func FeatureFlagSetID(val string) attribute.KeyValue { - return FeatureFlagSetIDKey.String(val) -} - -// FeatureFlagVersion returns an attribute KeyValue conforming to the -// "feature_flag.version" semantic conventions. It represents the version of the -// ruleset used during the evaluation. This may be any stable value which -// uniquely identifies the ruleset. -func FeatureFlagVersion(val string) attribute.KeyValue { - return FeatureFlagVersionKey.String(val) -} - -// Enum values for feature_flag.result.reason -var ( - // The resolved value is static (no dynamic evaluation). - // Stability: development - FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") - // The resolved value fell back to a pre-configured value (no dynamic evaluation - // occurred or dynamic evaluation yielded no result). - // Stability: development - FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") - // The resolved value was the result of a dynamic evaluation, such as a rule or - // specific user-targeting. - // Stability: development - FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") - // The resolved value was the result of pseudorandom assignment. - // Stability: development - FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") - // The resolved value was retrieved from cache. - // Stability: development - FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") - // The resolved value was the result of the flag being disabled in the - // management system. - // Stability: development - FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") - // The reason for the resolved value could not be determined. - // Stability: development - FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") - // The resolved value is non-authoritative or possibly out of date - // Stability: development - FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") - // The resolved value was the result of an error. - // Stability: development - FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") -) - -// Namespace: file -const ( - // FileAccessedKey is the attribute Key conforming to the "file.accessed" - // semantic conventions. It represents the time when the file was last accessed, - // in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileAccessedKey = attribute.Key("file.accessed") - - // FileAttributesKey is the attribute Key conforming to the "file.attributes" - // semantic conventions. It represents the array of file attributes. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "readonly", "hidden" - // Note: Attributes names depend on the OS or file system. Here’s a - // non-exhaustive list of values expected for this attribute: `archive`, - // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, - // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, - // `write`. - FileAttributesKey = attribute.Key("file.attributes") - - // FileChangedKey is the attribute Key conforming to the "file.changed" semantic - // conventions. It represents the time when the file attributes or metadata was - // last changed, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: `file.changed` captures the time when any of the file's properties or - // attributes (including the content) are changed, while `file.modified` - // captures the timestamp when the file content is modified. - FileChangedKey = attribute.Key("file.changed") - - // FileCreatedKey is the attribute Key conforming to the "file.created" semantic - // conventions. It represents the time when the file was created, in ISO 8601 - // format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - // Note: This attribute might not be supported by some file systems — NFS, - // FAT32, in embedded OS, etc. - FileCreatedKey = attribute.Key("file.created") - - // FileDirectoryKey is the attribute Key conforming to the "file.directory" - // semantic conventions. It represents the directory where the file is located. - // It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/user", "C:\Program Files\MyApp" - FileDirectoryKey = attribute.Key("file.directory") - - // FileExtensionKey is the attribute Key conforming to the "file.extension" - // semantic conventions. It represents the file extension, excluding the leading - // dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: When the file name has multiple extensions (example.tar.gz), only the - // last one should be captured ("gz", not "tar.gz"). - FileExtensionKey = attribute.Key("file.extension") - - // FileForkNameKey is the attribute Key conforming to the "file.fork_name" - // semantic conventions. It represents the name of the fork. A fork is - // additional data associated with a filesystem object. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Zone.Identifer" - // Note: On Linux, a resource fork is used to store additional data with a - // filesystem object. A file always has at least one fork for the data portion, - // and additional forks may exist. - // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default - // data stream for a file is just called $DATA. Zone.Identifier is commonly used - // by Windows to track contents downloaded from the Internet. An ADS is - // typically of the form: C:\path\to\filename.extension:some_fork_name, and - // some_fork_name is the value that should populate `fork_name`. - // `filename.extension` should populate `file.name`, and `extension` should - // populate `file.extension`. The full path, `file.path`, will include the fork - // name. - FileForkNameKey = attribute.Key("file.fork_name") - - // FileGroupIDKey is the attribute Key conforming to the "file.group.id" - // semantic conventions. It represents the primary Group ID (GID) of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileGroupIDKey = attribute.Key("file.group.id") - - // FileGroupNameKey is the attribute Key conforming to the "file.group.name" - // semantic conventions. It represents the primary group name of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "users" - FileGroupNameKey = attribute.Key("file.group.name") - - // FileInodeKey is the attribute Key conforming to the "file.inode" semantic - // conventions. It represents the inode representing the file in the filesystem. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "256383" - FileInodeKey = attribute.Key("file.inode") - - // FileModeKey is the attribute Key conforming to the "file.mode" semantic - // conventions. It represents the mode of the file in octal representation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0640" - FileModeKey = attribute.Key("file.mode") - - // FileModifiedKey is the attribute Key conforming to the "file.modified" - // semantic conventions. It represents the time when the file content was last - // modified, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T12:00:00Z" - FileModifiedKey = attribute.Key("file.modified") - - // FileNameKey is the attribute Key conforming to the "file.name" semantic - // conventions. It represents the name of the file including the extension, - // without the directory. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.png" - FileNameKey = attribute.Key("file.name") - - // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" - // semantic conventions. It represents the user ID (UID) or security identifier - // (SID) of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1000" - FileOwnerIDKey = attribute.Key("file.owner.id") - - // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" - // semantic conventions. It represents the username of the file owner. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - FileOwnerNameKey = attribute.Key("file.owner.name") - - // FilePathKey is the attribute Key conforming to the "file.path" semantic - // conventions. It represents the full path to the file, including the file - // name. It should include the drive letter, when appropriate. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" - FilePathKey = attribute.Key("file.path") - - // FileSizeKey is the attribute Key conforming to the "file.size" semantic - // conventions. It represents the file size in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - FileSizeKey = attribute.Key("file.size") - - // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the - // "file.symbolic_link.target_path" semantic conventions. It represents the path - // to the target of a symbolic link. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/python3" - // Note: This attribute is only applicable to symbolic links. - FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") -) - -// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" -// semantic conventions. It represents the time when the file was last accessed, -// in ISO 8601 format. -func FileAccessed(val string) attribute.KeyValue { - return FileAccessedKey.String(val) -} - -// FileAttributes returns an attribute KeyValue conforming to the -// "file.attributes" semantic conventions. It represents the array of file -// attributes. -func FileAttributes(val ...string) attribute.KeyValue { - return FileAttributesKey.StringSlice(val) -} - -// FileChanged returns an attribute KeyValue conforming to the "file.changed" -// semantic conventions. It represents the time when the file attributes or -// metadata was last changed, in ISO 8601 format. -func FileChanged(val string) attribute.KeyValue { - return FileChangedKey.String(val) -} - -// FileCreated returns an attribute KeyValue conforming to the "file.created" -// semantic conventions. It represents the time when the file was created, in ISO -// 8601 format. -func FileCreated(val string) attribute.KeyValue { - return FileCreatedKey.String(val) -} - -// FileDirectory returns an attribute KeyValue conforming to the "file.directory" -// semantic conventions. It represents the directory where the file is located. -// It should include the drive letter, when appropriate. -func FileDirectory(val string) attribute.KeyValue { - return FileDirectoryKey.String(val) -} - -// FileExtension returns an attribute KeyValue conforming to the "file.extension" -// semantic conventions. It represents the file extension, excluding the leading -// dot. -func FileExtension(val string) attribute.KeyValue { - return FileExtensionKey.String(val) -} - -// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" -// semantic conventions. It represents the name of the fork. A fork is additional -// data associated with a filesystem object. -func FileForkName(val string) attribute.KeyValue { - return FileForkNameKey.String(val) -} - -// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" -// semantic conventions. It represents the primary Group ID (GID) of the file. -func FileGroupID(val string) attribute.KeyValue { - return FileGroupIDKey.String(val) -} - -// FileGroupName returns an attribute KeyValue conforming to the -// "file.group.name" semantic conventions. It represents the primary group name -// of the file. -func FileGroupName(val string) attribute.KeyValue { - return FileGroupNameKey.String(val) -} - -// FileInode returns an attribute KeyValue conforming to the "file.inode" -// semantic conventions. It represents the inode representing the file in the -// filesystem. -func FileInode(val string) attribute.KeyValue { - return FileInodeKey.String(val) -} - -// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic -// conventions. It represents the mode of the file in octal representation. -func FileMode(val string) attribute.KeyValue { - return FileModeKey.String(val) -} - -// FileModified returns an attribute KeyValue conforming to the "file.modified" -// semantic conventions. It represents the time when the file content was last -// modified, in ISO 8601 format. -func FileModified(val string) attribute.KeyValue { - return FileModifiedKey.String(val) -} - -// FileName returns an attribute KeyValue conforming to the "file.name" semantic -// conventions. It represents the name of the file including the extension, -// without the directory. -func FileName(val string) attribute.KeyValue { - return FileNameKey.String(val) -} - -// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" -// semantic conventions. It represents the user ID (UID) or security identifier -// (SID) of the file owner. -func FileOwnerID(val string) attribute.KeyValue { - return FileOwnerIDKey.String(val) -} - -// FileOwnerName returns an attribute KeyValue conforming to the -// "file.owner.name" semantic conventions. It represents the username of the file -// owner. -func FileOwnerName(val string) attribute.KeyValue { - return FileOwnerNameKey.String(val) -} - -// FilePath returns an attribute KeyValue conforming to the "file.path" semantic -// conventions. It represents the full path to the file, including the file name. -// It should include the drive letter, when appropriate. -func FilePath(val string) attribute.KeyValue { - return FilePathKey.String(val) -} - -// FileSize returns an attribute KeyValue conforming to the "file.size" semantic -// conventions. It represents the file size in bytes. -func FileSize(val int) attribute.KeyValue { - return FileSizeKey.Int(val) -} - -// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the -// "file.symbolic_link.target_path" semantic conventions. It represents the path -// to the target of a symbolic link. -func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { - return FileSymbolicLinkTargetPathKey.String(val) -} - -// Namespace: gcp -const ( - // GCPAppHubApplicationContainerKey is the attribute Key conforming to the - // "gcp.apphub.application.container" semantic conventions. It represents the - // container within GCP where the AppHub application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "projects/my-container-project" - GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") - - // GCPAppHubApplicationIDKey is the attribute Key conforming to the - // "gcp.apphub.application.id" semantic conventions. It represents the name of - // the application as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-application" - GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") - - // GCPAppHubApplicationLocationKey is the attribute Key conforming to the - // "gcp.apphub.application.location" semantic conventions. It represents the GCP - // zone or region where the application is defined. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "us-central1" - GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") - - // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the - // "gcp.apphub.service.criticality_type" semantic conventions. It represents the - // criticality of a service indicates its importance to the business. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub type enum] - // - // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") - - // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the - // "gcp.apphub.service.environment_type" semantic conventions. It represents the - // environment of a service is the stage of a software lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub environment type] - // - // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") - - // GCPAppHubServiceIDKey is the attribute Key conforming to the - // "gcp.apphub.service.id" semantic conventions. It represents the name of the - // service as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-service" - GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") - - // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the - // "gcp.apphub.workload.criticality_type" semantic conventions. It represents - // the criticality of a workload indicates its importance to the business. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub type enum] - // - // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type - GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") - - // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the - // "gcp.apphub.workload.environment_type" semantic conventions. It represents - // the environment of a workload is the stage of a software lifecycle. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: [See AppHub environment type] - // - // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 - GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") - - // GCPAppHubWorkloadIDKey is the attribute Key conforming to the - // "gcp.apphub.workload.id" semantic conventions. It represents the name of the - // workload as configured in AppHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-workload" - GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") - - // GCPClientServiceKey is the attribute Key conforming to the - // "gcp.client.service" semantic conventions. It represents the identifies the - // Google Cloud service for which the official client library is intended. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "appengine", "run", "firestore", "alloydb", "spanner" - // Note: Intended to be a stable identifier for Google Cloud client libraries - // that is uniform across implementation languages. The value should be derived - // from the canonical service domain for the service; for example, - // 'foo.googleapis.com' should result in a value of 'foo'. - GCPClientServiceKey = attribute.Key("gcp.client.service") - - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of - // the Cloud Run [execution] being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`] environment variable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "job-name-xxxx", "sample-job-mdw84" - // - // [execution]: https://cloud.google.com/run/docs/managing/job-executions - // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index - // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] - // environment variable. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1 - // - // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") - - // GCPGCEInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname - // of a GCE instance. This is the full value of the default or [custom hostname] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-host1234.example.com", - // "sample-vm.us-west1-b.c.my-project.internal" - // - // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm - GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGCEInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance name - // of a GCE instance. This is the value provided by `host.name`, the visible - // name of the instance in the Cloud Console UI, and the prefix for the default - // hostname of the instance as defined by the [default internal DNS name]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "instance-1", "my-vm-name" - // - // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names - GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the -// "gcp.apphub.application.container" semantic conventions. It represents the -// container within GCP where the AppHub application is defined. -func GCPAppHubApplicationContainer(val string) attribute.KeyValue { - return GCPAppHubApplicationContainerKey.String(val) -} - -// GCPAppHubApplicationID returns an attribute KeyValue conforming to the -// "gcp.apphub.application.id" semantic conventions. It represents the name of -// the application as configured in AppHub. -func GCPAppHubApplicationID(val string) attribute.KeyValue { - return GCPAppHubApplicationIDKey.String(val) -} - -// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the -// "gcp.apphub.application.location" semantic conventions. It represents the GCP -// zone or region where the application is defined. -func GCPAppHubApplicationLocation(val string) attribute.KeyValue { - return GCPAppHubApplicationLocationKey.String(val) -} - -// GCPAppHubServiceID returns an attribute KeyValue conforming to the -// "gcp.apphub.service.id" semantic conventions. It represents the name of the -// service as configured in AppHub. -func GCPAppHubServiceID(val string) attribute.KeyValue { - return GCPAppHubServiceIDKey.String(val) -} - -// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the -// "gcp.apphub.workload.id" semantic conventions. It represents the name of the -// workload as configured in AppHub. -func GCPAppHubWorkloadID(val string) attribute.KeyValue { - return GCPAppHubWorkloadIDKey.String(val) -} - -// GCPClientService returns an attribute KeyValue conforming to the -// "gcp.client.service" semantic conventions. It represents the identifies the -// Google Cloud service for which the official client library is intended. -func GCPClientService(val string) attribute.KeyValue { - return GCPClientServiceKey.String(val) -} - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of -// the Cloud Run [execution] being run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`] environment variable. -// -// [execution]: https://cloud.google.com/run/docs/managing/job-executions -// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] -// environment variable. -// -// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom hostname] -// . -// -// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm -func GCPGCEInstanceHostname(val string) attribute.KeyValue { - return GCPGCEInstanceHostnameKey.String(val) -} - -// GCPGCEInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance name -// of a GCE instance. This is the value provided by `host.name`, the visible name -// of the instance in the Cloud Console UI, and the prefix for the default -// hostname of the instance as defined by the [default internal DNS name]. -// -// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names -func GCPGCEInstanceName(val string) attribute.KeyValue { - return GCPGCEInstanceNameKey.String(val) -} - -// Enum values for gcp.apphub.service.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub.service.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Enum values for gcp.apphub.workload.criticality_type -var ( - // Mission critical service. - // Stability: development - GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") - // High impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") - // Medium impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") - // Low impact. - // Stability: development - GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") -) - -// Enum values for gcp.apphub.workload.environment_type -var ( - // Production environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") - // Staging environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") - // Test environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") - // Development environment. - // Stability: development - GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") -) - -// Namespace: gen_ai -const ( - // GenAIAgentDescriptionKey is the attribute Key conforming to the - // "gen_ai.agent.description" semantic conventions. It represents the free-form - // description of the GenAI agent provided by the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Helps with math problems", "Generates fiction stories" - GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") - - // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" - // semantic conventions. It represents the unique identifier of the GenAI agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" - GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") - - // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" - // semantic conventions. It represents the human-readable name of the GenAI - // agent provided by the application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Math Tutor", "Fiction Writer" - GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") - - // GenAIConversationIDKey is the attribute Key conforming to the - // "gen_ai.conversation.id" semantic conventions. It represents the unique - // identifier for a conversation (session, thread), used to store and correlate - // messages within this conversation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" - GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") - - // GenAIDataSourceIDKey is the attribute Key conforming to the - // "gen_ai.data_source.id" semantic conventions. It represents the data source - // identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "H7STPQYOND" - // Note: Data sources are used by AI agents and RAG applications to store - // grounding data. A data source may be an external database, object store, - // document collection, website, or any other storage system used by the GenAI - // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier - // used by the GenAI system rather than a name specific to the external storage, - // such as a database or object store. Semantic conventions referencing - // `gen_ai.data_source.id` MAY also leverage additional attributes, such as - // `db.*`, to further identify and describe the data source. - GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") - - // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.request.service_tier" semantic conventions. It represents the - // service tier requested. May be a specific tier, default, or auto. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "auto", "default" - GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") - - // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the - // "gen_ai.openai.response.service_tier" semantic conventions. It represents the - // service tier used for the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "scale", "default" - GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") - - // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to - // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It - // represents a fingerprint to track any eventual change in the Generative AI - // environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fp_44709d6fcb" - GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") - - // GenAIOperationNameKey is the attribute Key conforming to the - // "gen_ai.operation.name" semantic conventions. It represents the name of the - // operation being performed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If one of the predefined values applies, but specific system uses a - // different name it's RECOMMENDED to document it in the semantic conventions - // for specific GenAI system and use system-specific name in the - // instrumentation. If a different name is not documented, instrumentation - // libraries SHOULD use applicable predefined value. - GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") - - // GenAIOutputTypeKey is the attribute Key conforming to the - // "gen_ai.output.type" semantic conventions. It represents the represents the - // content type requested by the client. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute SHOULD be used when the client requests output of a - // specific type. The model may return zero or more outputs of this type. - // This attribute specifies the output modality and not the actual output - // format. For example, if an image is requested, the actual output could be a - // URL pointing to an image file. - // Additional output format details may be recorded in the future in the - // `gen_ai.output.{type}.*` attributes. - GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") - - // GenAIRequestChoiceCountKey is the attribute Key conforming to the - // "gen_ai.request.choice.count" semantic conventions. It represents the target - // number of candidate completions to return. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3 - GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") - - // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the - // "gen_ai.request.encoding_formats" semantic conventions. It represents the - // encoding formats requested in an embeddings operation, if specified. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "base64"], ["float", "binary" - // Note: In some GenAI systems the encoding formats are called embedding types. - // Also, some GenAI systems only accept a single format per request. - GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") - - // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the - // "gen_ai.request.frequency_penalty" semantic conventions. It represents the - // frequency penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") - - // GenAIRequestMaxTokensKey is the attribute Key conforming to the - // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum - // number of tokens the model generates for a request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") - - // GenAIRequestModelKey is the attribute Key conforming to the - // "gen_ai.request.model" semantic conventions. It represents the name of the - // GenAI model a request is being made to. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: gpt-4 - GenAIRequestModelKey = attribute.Key("gen_ai.request.model") - - // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the - // "gen_ai.request.presence_penalty" semantic conventions. It represents the - // presence penalty setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.1 - GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") - - // GenAIRequestSeedKey is the attribute Key conforming to the - // "gen_ai.request.seed" semantic conventions. It represents the requests with - // same seed value more likely to return same result. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") - - // GenAIRequestStopSequencesKey is the attribute Key conforming to the - // "gen_ai.request.stop_sequences" semantic conventions. It represents the list - // of sequences that the model will use to stop generating further tokens. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "forest", "lived" - GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") - - // GenAIRequestTemperatureKey is the attribute Key conforming to the - // "gen_ai.request.temperature" semantic conventions. It represents the - // temperature setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0.0 - GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") - - // GenAIRequestTopKKey is the attribute Key conforming to the - // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") - - // GenAIRequestTopPKey is the attribute Key conforming to the - // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling - // setting for the GenAI request. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1.0 - GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") - - // GenAIResponseFinishReasonsKey is the attribute Key conforming to the - // "gen_ai.response.finish_reasons" semantic conventions. It represents the - // array of reasons the model stopped generating tokens, corresponding to each - // generation received. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "stop"], ["stop", "length" - GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") - - // GenAIResponseIDKey is the attribute Key conforming to the - // "gen_ai.response.id" semantic conventions. It represents the unique - // identifier for the completion. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "chatcmpl-123" - GenAIResponseIDKey = attribute.Key("gen_ai.response.id") - - // GenAIResponseModelKey is the attribute Key conforming to the - // "gen_ai.response.model" semantic conventions. It represents the name of the - // model that generated the response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "gpt-4-0613" - GenAIResponseModelKey = attribute.Key("gen_ai.response.model") - - // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" - // semantic conventions. It represents the Generative AI product as identified - // by the client or server instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: openai - // Note: The `gen_ai.system` describes a family of GenAI models with specific - // model identified - // by `gen_ai.request.model` and `gen_ai.response.model` attributes. - // - // The actual GenAI product may differ from the one identified by the client. - // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI - // client - // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the - // instrumentation's best knowledge, instead of the actual system. The - // `server.address` - // attribute may help identify the actual system in use for `openai`. - // - // For custom model, a custom friendly name SHOULD be used. - // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` - // . - GenAISystemKey = attribute.Key("gen_ai.system") - - // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" - // semantic conventions. It represents the type of token being counted. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "input", "output" - GenAITokenTypeKey = attribute.Key("gen_ai.token.type") - - // GenAIToolCallIDKey is the attribute Key conforming to the - // "gen_ai.tool.call.id" semantic conventions. It represents the tool call - // identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" - GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") - - // GenAIToolDescriptionKey is the attribute Key conforming to the - // "gen_ai.tool.description" semantic conventions. It represents the tool - // description. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Multiply two numbers" - GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") - - // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" - // semantic conventions. It represents the name of the tool utilized by the - // agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Flights" - GenAIToolNameKey = attribute.Key("gen_ai.tool.name") - - // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" - // semantic conventions. It represents the type of the tool utilized by the - // agent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "function", "extension", "datastore" - // Note: Extension: A tool executed on the agent-side to directly call external - // APIs, bridging the gap between the agent and real-world systems. - // Agent-side operations involve actions that are performed by the agent on the - // server or within the agent's controlled environment. - // Function: A tool executed on the client-side, where the agent generates - // parameters for a predefined function, and the client executes the logic. - // Client-side operations are actions taken on the user's end or within the - // client application. - // Datastore: A tool used by the agent to access and query structured or - // unstructured external data for retrieval-augmented tasks or knowledge - // updates. - GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") - - // GenAIUsageInputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of - // tokens used in the GenAI input (prompt). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 100 - GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") - - // GenAIUsageOutputTokensKey is the attribute Key conforming to the - // "gen_ai.usage.output_tokens" semantic conventions. It represents the number - // of tokens used in the GenAI response (completion). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 180 - GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") -) - -// GenAIAgentDescription returns an attribute KeyValue conforming to the -// "gen_ai.agent.description" semantic conventions. It represents the free-form -// description of the GenAI agent provided by the application. -func GenAIAgentDescription(val string) attribute.KeyValue { - return GenAIAgentDescriptionKey.String(val) -} - -// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" -// semantic conventions. It represents the unique identifier of the GenAI agent. -func GenAIAgentID(val string) attribute.KeyValue { - return GenAIAgentIDKey.String(val) -} - -// GenAIAgentName returns an attribute KeyValue conforming to the -// "gen_ai.agent.name" semantic conventions. It represents the human-readable -// name of the GenAI agent provided by the application. -func GenAIAgentName(val string) attribute.KeyValue { - return GenAIAgentNameKey.String(val) -} - -// GenAIConversationID returns an attribute KeyValue conforming to the -// "gen_ai.conversation.id" semantic conventions. It represents the unique -// identifier for a conversation (session, thread), used to store and correlate -// messages within this conversation. -func GenAIConversationID(val string) attribute.KeyValue { - return GenAIConversationIDKey.String(val) -} - -// GenAIDataSourceID returns an attribute KeyValue conforming to the -// "gen_ai.data_source.id" semantic conventions. It represents the data source -// identifier. -func GenAIDataSourceID(val string) attribute.KeyValue { - return GenAIDataSourceIDKey.String(val) -} - -// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the -// "gen_ai.openai.response.service_tier" semantic conventions. It represents the -// service tier used for the response. -func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { - return GenAIOpenAIResponseServiceTierKey.String(val) -} - -// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming -// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It -// represents a fingerprint to track any eventual change in the Generative AI -// environment. -func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { - return GenAIOpenAIResponseSystemFingerprintKey.String(val) -} - -// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the -// "gen_ai.request.choice.count" semantic conventions. It represents the target -// number of candidate completions to return. -func GenAIRequestChoiceCount(val int) attribute.KeyValue { - return GenAIRequestChoiceCountKey.Int(val) -} - -// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the -// "gen_ai.request.encoding_formats" semantic conventions. It represents the -// encoding formats requested in an embeddings operation, if specified. -func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { - return GenAIRequestEncodingFormatsKey.StringSlice(val) -} - -// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.frequency_penalty" semantic conventions. It represents the -// frequency penalty setting for the GenAI request. -func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { - return GenAIRequestFrequencyPenaltyKey.Float64(val) -} - -// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the -// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum -// number of tokens the model generates for a request. -func GenAIRequestMaxTokens(val int) attribute.KeyValue { - return GenAIRequestMaxTokensKey.Int(val) -} - -// GenAIRequestModel returns an attribute KeyValue conforming to the -// "gen_ai.request.model" semantic conventions. It represents the name of the -// GenAI model a request is being made to. -func GenAIRequestModel(val string) attribute.KeyValue { - return GenAIRequestModelKey.String(val) -} - -// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the -// "gen_ai.request.presence_penalty" semantic conventions. It represents the -// presence penalty setting for the GenAI request. -func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { - return GenAIRequestPresencePenaltyKey.Float64(val) -} - -// GenAIRequestSeed returns an attribute KeyValue conforming to the -// "gen_ai.request.seed" semantic conventions. It represents the requests with -// same seed value more likely to return same result. -func GenAIRequestSeed(val int) attribute.KeyValue { - return GenAIRequestSeedKey.Int(val) -} - -// GenAIRequestStopSequences returns an attribute KeyValue conforming to the -// "gen_ai.request.stop_sequences" semantic conventions. It represents the list -// of sequences that the model will use to stop generating further tokens. -func GenAIRequestStopSequences(val ...string) attribute.KeyValue { - return GenAIRequestStopSequencesKey.StringSlice(val) -} - -// GenAIRequestTemperature returns an attribute KeyValue conforming to the -// "gen_ai.request.temperature" semantic conventions. It represents the -// temperature setting for the GenAI request. -func GenAIRequestTemperature(val float64) attribute.KeyValue { - return GenAIRequestTemperatureKey.Float64(val) -} - -// GenAIRequestTopK returns an attribute KeyValue conforming to the -// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling -// setting for the GenAI request. -func GenAIRequestTopK(val float64) attribute.KeyValue { - return GenAIRequestTopKKey.Float64(val) -} - -// GenAIRequestTopP returns an attribute KeyValue conforming to the -// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling -// setting for the GenAI request. -func GenAIRequestTopP(val float64) attribute.KeyValue { - return GenAIRequestTopPKey.Float64(val) -} - -// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the -// "gen_ai.response.finish_reasons" semantic conventions. It represents the array -// of reasons the model stopped generating tokens, corresponding to each -// generation received. -func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { - return GenAIResponseFinishReasonsKey.StringSlice(val) -} - -// GenAIResponseID returns an attribute KeyValue conforming to the -// "gen_ai.response.id" semantic conventions. It represents the unique identifier -// for the completion. -func GenAIResponseID(val string) attribute.KeyValue { - return GenAIResponseIDKey.String(val) -} - -// GenAIResponseModel returns an attribute KeyValue conforming to the -// "gen_ai.response.model" semantic conventions. It represents the name of the -// model that generated the response. -func GenAIResponseModel(val string) attribute.KeyValue { - return GenAIResponseModelKey.String(val) -} - -// GenAIToolCallID returns an attribute KeyValue conforming to the -// "gen_ai.tool.call.id" semantic conventions. It represents the tool call -// identifier. -func GenAIToolCallID(val string) attribute.KeyValue { - return GenAIToolCallIDKey.String(val) -} - -// GenAIToolDescription returns an attribute KeyValue conforming to the -// "gen_ai.tool.description" semantic conventions. It represents the tool -// description. -func GenAIToolDescription(val string) attribute.KeyValue { - return GenAIToolDescriptionKey.String(val) -} - -// GenAIToolName returns an attribute KeyValue conforming to the -// "gen_ai.tool.name" semantic conventions. It represents the name of the tool -// utilized by the agent. -func GenAIToolName(val string) attribute.KeyValue { - return GenAIToolNameKey.String(val) -} - -// GenAIToolType returns an attribute KeyValue conforming to the -// "gen_ai.tool.type" semantic conventions. It represents the type of the tool -// utilized by the agent. -func GenAIToolType(val string) attribute.KeyValue { - return GenAIToolTypeKey.String(val) -} - -// GenAIUsageInputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI input (prompt). -func GenAIUsageInputTokens(val int) attribute.KeyValue { - return GenAIUsageInputTokensKey.Int(val) -} - -// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the -// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of -// tokens used in the GenAI response (completion). -func GenAIUsageOutputTokens(val int) attribute.KeyValue { - return GenAIUsageOutputTokensKey.Int(val) -} - -// Enum values for gen_ai.openai.request.service_tier -var ( - // The system will utilize scale tier credits until they are exhausted. - // Stability: development - GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") - // The system will utilize the default scale tier. - // Stability: development - GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") -) - -// Enum values for gen_ai.operation.name -var ( - // Chat completion operation such as [OpenAI Chat API] - // Stability: development - // - // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat - GenAIOperationNameChat = GenAIOperationNameKey.String("chat") - // Multimodal content generation operation such as [Gemini Generate Content] - // Stability: development - // - // [Gemini Generate Content]: https://ai.google.dev/api/generate-content - GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") - // Text completions operation such as [OpenAI Completions API (Legacy)] - // Stability: development - // - // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions - GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") - // Embeddings operation such as [OpenAI Create embeddings API] - // Stability: development - // - // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create - GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") - // Create GenAI agent - // Stability: development - GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") - // Invoke GenAI agent - // Stability: development - GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") - // Execute a tool - // Stability: development - GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") -) - -// Enum values for gen_ai.output.type -var ( - // Plain text - // Stability: development - GenAIOutputTypeText = GenAIOutputTypeKey.String("text") - // JSON object with known or unknown schema - // Stability: development - GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") - // Image - // Stability: development - GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") - // Speech - // Stability: development - GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") -) - -// Enum values for gen_ai.system -var ( - // OpenAI - // Stability: development - GenAISystemOpenAI = GenAISystemKey.String("openai") - // Any Google generative AI endpoint - // Stability: development - GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") - // Vertex AI - // Stability: development - GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") - // Gemini - // Stability: development - GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") - // Deprecated: Use 'gcp.vertex_ai' instead. - GenAISystemVertexAI = GenAISystemKey.String("vertex_ai") - // Deprecated: Use 'gcp.gemini' instead. - GenAISystemGemini = GenAISystemKey.String("gemini") - // Anthropic - // Stability: development - GenAISystemAnthropic = GenAISystemKey.String("anthropic") - // Cohere - // Stability: development - GenAISystemCohere = GenAISystemKey.String("cohere") - // Azure AI Inference - // Stability: development - GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference") - // Azure OpenAI - // Stability: development - GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai") - // IBM Watsonx AI - // Stability: development - GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") - // AWS Bedrock - // Stability: development - GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") - // Perplexity - // Stability: development - GenAISystemPerplexity = GenAISystemKey.String("perplexity") - // xAI - // Stability: development - GenAISystemXai = GenAISystemKey.String("xai") - // DeepSeek - // Stability: development - GenAISystemDeepseek = GenAISystemKey.String("deepseek") - // Groq - // Stability: development - GenAISystemGroq = GenAISystemKey.String("groq") - // Mistral AI - // Stability: development - GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") -) - -// Enum values for gen_ai.token.type -var ( - // Input tokens (prompt, input, etc.) - // Stability: development - GenAITokenTypeInput = GenAITokenTypeKey.String("input") - // Deprecated: Replaced by `output`. - GenAITokenTypeCompletion = GenAITokenTypeKey.String("output") - // Output tokens (completion, response, etc.) - // Stability: development - GenAITokenTypeOutput = GenAITokenTypeKey.String("output") -) - -// Namespace: geo -const ( - // GeoContinentCodeKey is the attribute Key conforming to the - // "geo.continent.code" semantic conventions. It represents the two-letter code - // representing continent’s name. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - GeoContinentCodeKey = attribute.Key("geo.continent.code") - - // GeoCountryISOCodeKey is the attribute Key conforming to the - // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO - // Country Code ([ISO 3166-1 alpha2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA" - // - // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes - GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") - - // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" - // semantic conventions. It represents the locality name. Represents the name of - // a city, town, village, or similar populated place. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Montreal", "Berlin" - GeoLocalityNameKey = attribute.Key("geo.locality.name") - - // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" - // semantic conventions. It represents the latitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 45.505918 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLatKey = attribute.Key("geo.location.lat") - - // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" - // semantic conventions. It represents the longitude of the geo location in - // [WGS84]. - // - // Type: double - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -73.61483 - // - // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 - GeoLocationLonKey = attribute.Key("geo.location.lon") - - // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" - // semantic conventions. It represents the postal code associated with the - // location. Values appropriate for this field may also be known as a postcode - // or ZIP code and will vary widely from country to country. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "94040" - GeoPostalCodeKey = attribute.Key("geo.postal_code") - - // GeoRegionISOCodeKey is the attribute Key conforming to the - // "geo.region.iso_code" semantic conventions. It represents the region ISO code - // ([ISO 3166-2]). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CA-QC" - // - // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 - GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") -) - -// GeoCountryISOCode returns an attribute KeyValue conforming to the -// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO -// Country Code ([ISO 3166-1 alpha2]). -// -// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes -func GeoCountryISOCode(val string) attribute.KeyValue { - return GeoCountryISOCodeKey.String(val) -} - -// GeoLocalityName returns an attribute KeyValue conforming to the -// "geo.locality.name" semantic conventions. It represents the locality name. -// Represents the name of a city, town, village, or similar populated place. -func GeoLocalityName(val string) attribute.KeyValue { - return GeoLocalityNameKey.String(val) -} - -// GeoLocationLat returns an attribute KeyValue conforming to the -// "geo.location.lat" semantic conventions. It represents the latitude of the geo -// location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLat(val float64) attribute.KeyValue { - return GeoLocationLatKey.Float64(val) -} - -// GeoLocationLon returns an attribute KeyValue conforming to the -// "geo.location.lon" semantic conventions. It represents the longitude of the -// geo location in [WGS84]. -// -// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 -func GeoLocationLon(val float64) attribute.KeyValue { - return GeoLocationLonKey.Float64(val) -} - -// GeoPostalCode returns an attribute KeyValue conforming to the -// "geo.postal_code" semantic conventions. It represents the postal code -// associated with the location. Values appropriate for this field may also be -// known as a postcode or ZIP code and will vary widely from country to country. -func GeoPostalCode(val string) attribute.KeyValue { - return GeoPostalCodeKey.String(val) -} - -// GeoRegionISOCode returns an attribute KeyValue conforming to the -// "geo.region.iso_code" semantic conventions. It represents the region ISO code -// ([ISO 3166-2]). -// -// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 -func GeoRegionISOCode(val string) attribute.KeyValue { - return GeoRegionISOCodeKey.String(val) -} - -// Enum values for geo.continent.code -var ( - // Africa - // Stability: development - GeoContinentCodeAf = GeoContinentCodeKey.String("AF") - // Antarctica - // Stability: development - GeoContinentCodeAn = GeoContinentCodeKey.String("AN") - // Asia - // Stability: development - GeoContinentCodeAs = GeoContinentCodeKey.String("AS") - // Europe - // Stability: development - GeoContinentCodeEu = GeoContinentCodeKey.String("EU") - // North America - // Stability: development - GeoContinentCodeNa = GeoContinentCodeKey.String("NA") - // Oceania - // Stability: development - GeoContinentCodeOc = GeoContinentCodeKey.String("OC") - // South America - // Stability: development - GeoContinentCodeSa = GeoContinentCodeKey.String("SA") -) - -// Namespace: go -const ( - // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" - // semantic conventions. It represents the type of memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "other", "stack" - GoMemoryTypeKey = attribute.Key("go.memory.type") -) - -// Enum values for go.memory.type -var ( - // Memory allocated from the heap that is reserved for stack space, whether or - // not it is currently in-use. - // Stability: development - GoMemoryTypeStack = GoMemoryTypeKey.String("stack") - // Memory used by the Go runtime, excluding other categories of memory usage - // described in this enumeration. - // Stability: development - GoMemoryTypeOther = GoMemoryTypeKey.String("other") -) - -// Namespace: graphql -const ( - // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" - // semantic conventions. It represents the GraphQL document being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: query findBookById { bookById(id: ?) { name } } - // Note: The value may be sanitized to exclude sensitive information. - GraphQLDocumentKey = attribute.Key("graphql.document") - - // GraphQLOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of the - // operation being executed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: findBookById - GraphQLOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphQLOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of the - // operation being executed. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "query", "mutation", "subscription" - GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") -) - -// GraphQLDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphQLDocument(val string) attribute.KeyValue { - return GraphQLDocumentKey.String(val) -} - -// GraphQLOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphQLOperationName(val string) attribute.KeyValue { - return GraphQLOperationNameKey.String(val) -} - -// Enum values for graphql.operation.type -var ( - // GraphQL query - // Stability: development - GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") - // GraphQL mutation - // Stability: development - GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") - // GraphQL subscription - // Stability: development - GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") -) - -// Namespace: heroku -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit hash - // for the current release. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents the - // time and date the release was created. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2022-10-23T18:00:42Z" - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" -// semantic conventions. It represents the unique identifier for the application. -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release. -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the -// "heroku.release.creation_timestamp" semantic conventions. It represents the -// time and date the release was created. -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// Namespace: host -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is running - // on. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of - // level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" - // semantic conventions. It represents the family or generation of the CPU. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "PA-RISC 1.1e" - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" - // semantic conventions. It represents the model identifier. It provides more - // granular information about the CPU, distinguishing it from other CPUs within - // the same family. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "6", "9000/778/B180L" - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" - // semantic conventions. It represents the stepping or core revisions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1", "r1p1" - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "GenuineIntel" - // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX - // registers. Writing these to memory in this order results in a 12-character - // string. - // - // [CPUID]: https://wiki.osdev.org/CPUID - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be the - // instance_id assigned by the cloud provider. For non-containerized systems, - // this should be the `machine-id`. See the table below for the sources to use - // to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "fdbf79e8af94cb7f9e8df36789187052" - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the VM image ID or host OS image ID. For - // Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ami-07b06b442921831e5" - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the "host.image.name" - // semantic conventions. It represents the name of the VM image or OS install - // the host was instantiated from. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version string - // of the VM image or host OS as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC 5952] format. - // - // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, excluding - // loopback interfaces. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as - // hyphen-separated octets in uppercase hexadecimal form from most to least - // significant. - // - // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified hostname, - // or another name specified by the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-test" - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "n1-standard-1" - HostTypeKey = attribute.Key("host.type") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or generation -// of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model identifier. -// It provides more granular information about the CPU, distinguishing it from -// other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val string) attribute.KeyValue { - return HostCPUSteppingKey.String(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use to -// determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the "host.image.id" -// semantic conventions. It represents the VM image ID or host OS image ID. For -// Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM image -// or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string of -// the VM image or host OS as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic -// conventions. It represents the available MAC addresses of the host, excluding -// loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" semantic -// conventions. It represents the name of the host. On Unix systems, it may -// contain what the hostname command returns, or the fully qualified hostname, or -// another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" semantic -// conventions. It represents the type of host. For Cloud, this must be the -// machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Enum values for host.arch -var ( - // AMD64 - // Stability: development - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - // Stability: development - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - // Stability: development - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - // Stability: development - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - // Stability: development - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - // Stability: development - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - // Stability: development - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - // Stability: development - HostArchX86 = HostArchKey.String("x86") -) - -// Namespace: http -const ( - // HTTPConnectionStateKey is the attribute Key conforming to the - // "http.connection.state" semantic conventions. It represents the state of the - // HTTP connection in the HTTP connection pool. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "idle" - HTTPConnectionStateKey = attribute.Key("http.connection.state") - - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of the - // request payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the HTTP request - // method. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GET", "POST", "HEAD" - // Note: HTTP request method value SHOULD be "known" to the instrumentation. - // By default, this convention defines "known" methods as the ones listed in - // [RFC9110] - // and the PATCH method defined in [RFC5789]. - // - // If the HTTP request method is not known to instrumentation, it MUST set the - // `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of - // case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is not a - // list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods to be - // case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - // - // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods - // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "GeT", "ACL", "foo" - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the ordinal - // number of request resending attempt (for any reason, including redirects). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending (e.g. - // redirection, authorization failure, 503 Server Unavailable, network issues, - // or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" - // semantic conventions. It represents the total size of the request in bytes. - // This should be the total number of bytes sent over the wire, including the - // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request - // body if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPRequestSizeKey = attribute.Key("http.request.size") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size of the - // response payload body in bytes. This is the number of bytes transferred - // excluding headers and is often, but not always, present as the - // [Content-Length] header. For requests using transport encoding, this should - // be the compressed size. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseSizeKey is the attribute Key conforming to the - // "http.response.size" semantic conventions. It represents the total size of - // the response in bytes. This should be the total number of bytes sent over the - // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), - // headers, and response body and trailers if any. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - HTTPResponseSizeKey = attribute.Key("http.response.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status code]. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 200 - // - // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic - // conventions. It represents the matched route, that is, the path template in - // the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/users/:userID?", "{controller}/{action}/{id?}" - // Note: MUST NOT be populated when this is not supported by the HTTP server - // framework as the route attribute should have low-cardinality and the URI path - // can NOT substitute it. - // SHOULD include the [application root] if there is one. - // - // [application root]: /docs/http/http-spans.md#http-server-definitions - HTTPRouteKey = attribute.Key("http.route") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestHeader returns an attribute KeyValue conforming to the -// "http.request.header" semantic conventions. It represents the HTTP request -// headers, `` being the normalized HTTP Header name (lowercase), the value -// being the header values. -func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("http.request.header."+key, val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPRequestSize returns an attribute KeyValue conforming to the -// "http.request.size" semantic conventions. It represents the total size of the -// request in bytes. This should be the total number of bytes sent over the wire, -// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, -// and request body if any. -func HTTPRequestSize(val int) attribute.KeyValue { - return HTTPRequestSizeKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of the -// response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length] header. For requests using transport encoding, this should be -// the compressed size. -// -// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseHeader returns an attribute KeyValue conforming to the -// "http.response.header" semantic conventions. It represents the HTTP response -// headers, `` being the normalized HTTP Header name (lowercase), the value -// being the header values. -func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("http.response.header."+key, val) -} - -// HTTPResponseSize returns an attribute KeyValue conforming to the -// "http.response.size" semantic conventions. It represents the total size of the -// response in bytes. This should be the total number of bytes sent over the -// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), -// headers, and response body and trailers if any. -func HTTPResponseSize(val int) attribute.KeyValue { - return HTTPResponseSizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the -// [HTTP response status code]. -// -// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Enum values for http.connection.state -var ( - // active state. - // Stability: development - HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") - // idle state. - // Stability: development - HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") -) - -// Enum values for http.request.method -var ( - // CONNECT method. - // Stability: stable - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method. - // Stability: stable - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method. - // Stability: stable - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method. - // Stability: stable - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method. - // Stability: stable - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method. - // Stability: stable - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method. - // Stability: stable - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method. - // Stability: stable - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method. - // Stability: stable - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of. - // Stability: stable - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// Namespace: hw -const ( - // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. - // It represents an identifier for the hardware component, unique within the - // monitored host. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "win32battery_battery_testsysa33_1" - HwIDKey = attribute.Key("hw.id") - - // HwNameKey is the attribute Key conforming to the "hw.name" semantic - // conventions. It represents an easily-recognizable name for the hardware - // component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "eth0" - HwNameKey = attribute.Key("hw.name") - - // HwParentKey is the attribute Key conforming to the "hw.parent" semantic - // conventions. It represents the unique identifier of the parent component - // (typically the `hw.id` attribute of the enclosure, or disk controller). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "dellStorage_perc_0" - HwParentKey = attribute.Key("hw.parent") - - // HwStateKey is the attribute Key conforming to the "hw.state" semantic - // conventions. It represents the current state of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - HwStateKey = attribute.Key("hw.state") - - // HwTypeKey is the attribute Key conforming to the "hw.type" semantic - // conventions. It represents the type of the component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: Describes the category of the hardware component for which `hw.state` - // is being reported. For example, `hw.type=temperature` along with - // `hw.state=degraded` would indicate that the temperature of the hardware - // component has been reported as `degraded`. - HwTypeKey = attribute.Key("hw.type") -) - -// HwID returns an attribute KeyValue conforming to the "hw.id" semantic -// conventions. It represents an identifier for the hardware component, unique -// within the monitored host. -func HwID(val string) attribute.KeyValue { - return HwIDKey.String(val) -} - -// HwName returns an attribute KeyValue conforming to the "hw.name" semantic -// conventions. It represents an easily-recognizable name for the hardware -// component. -func HwName(val string) attribute.KeyValue { - return HwNameKey.String(val) -} - -// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic -// conventions. It represents the unique identifier of the parent component -// (typically the `hw.id` attribute of the enclosure, or disk controller). -func HwParent(val string) attribute.KeyValue { - return HwParentKey.String(val) -} - -// Enum values for hw.state -var ( - // Ok - // Stability: development - HwStateOk = HwStateKey.String("ok") - // Degraded - // Stability: development - HwStateDegraded = HwStateKey.String("degraded") - // Failed - // Stability: development - HwStateFailed = HwStateKey.String("failed") -) - -// Enum values for hw.type -var ( - // Battery - // Stability: development - HwTypeBattery = HwTypeKey.String("battery") - // CPU - // Stability: development - HwTypeCPU = HwTypeKey.String("cpu") - // Disk controller - // Stability: development - HwTypeDiskController = HwTypeKey.String("disk_controller") - // Enclosure - // Stability: development - HwTypeEnclosure = HwTypeKey.String("enclosure") - // Fan - // Stability: development - HwTypeFan = HwTypeKey.String("fan") - // GPU - // Stability: development - HwTypeGpu = HwTypeKey.String("gpu") - // Logical disk - // Stability: development - HwTypeLogicalDisk = HwTypeKey.String("logical_disk") - // Memory - // Stability: development - HwTypeMemory = HwTypeKey.String("memory") - // Network - // Stability: development - HwTypeNetwork = HwTypeKey.String("network") - // Physical disk - // Stability: development - HwTypePhysicalDisk = HwTypeKey.String("physical_disk") - // Power supply - // Stability: development - HwTypePowerSupply = HwTypeKey.String("power_supply") - // Tape drive - // Stability: development - HwTypeTapeDrive = HwTypeKey.String("tape_drive") - // Temperature - // Stability: development - HwTypeTemperature = HwTypeKey.String("temperature") - // Voltage - // Stability: development - HwTypeVoltage = HwTypeKey.String("voltage") -) - -// Namespace: ios -const ( - // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" - // semantic conventions. It represents the this attribute represents the state - // of the application. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The iOS lifecycle states are defined in the - // [UIApplicationDelegate documentation], and from which the `OS terminology` - // column values are derived. - // - // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate - IOSAppStateKey = attribute.Key("ios.app.state") -) - -// Enum values for ios.app.state -var ( - // The app has become `active`. Associated with UIKit notification - // `applicationDidBecomeActive`. - // - // Stability: development - IOSAppStateActive = IOSAppStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification - // `applicationWillResignActive`. - // - // Stability: development - IOSAppStateInactive = IOSAppStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit - // notification `applicationDidEnterBackground`. - // - // Stability: development - IOSAppStateBackground = IOSAppStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit - // notification `applicationWillEnterForeground`. - // - // Stability: development - IOSAppStateForeground = IOSAppStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification - // `applicationWillTerminate`. - // - // Stability: development - IOSAppStateTerminate = IOSAppStateKey.String("terminate") -) - -// Namespace: k8s -const ( - // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" - // semantic conventions. It represents the name of the cluster. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-cluster" - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" - // semantic conventions. It represents a pseudo-ID for the cluster, set to the - // UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" - // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8s cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8s ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T X.667]. - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // > different from all other UUIDs generated before 3603 A.D., or is - // > extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - // - // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "redis" - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the number - // of times the container was restarted. This attribute can be used to identify - // a particular container (running or stopped) within a container spec. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to - // the "k8s.container.status.last_terminated_reason" semantic conventions. It - // represents the last terminated reason of the Container. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Evicted", "Error" - K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") - - // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" - // semantic conventions. It represents the name of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" - // semantic conventions. It represents the UID of the CronJob. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" - // semantic conventions. It represents the UID of the DaemonSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic - // conventions. It represents the name of the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SHPANameKey = attribute.Key("k8s.hpa.name") - - // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic - // conventions. It represents the UID of the horizontal pod autoscaler. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic - // conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic - // conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "default" - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNamespacePhaseKey is the attribute Key conforming to the - // "k8s.namespace.phase" semantic conventions. It represents the phase of the - // K8s namespace. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "active", "terminating" - // Note: This attribute aligns with the `phase` field of the - // [K8s NamespaceStatus] - // - // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core - K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "node-1" - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic - // conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic - // conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry-pod-autoconf" - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic - // conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SReplicationControllerNameKey is the attribute Key conforming to the - // "k8s.replicationcontroller.name" semantic conventions. It represents the name - // of the replication controller. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") - - // K8SReplicationControllerUIDKey is the attribute Key conforming to the - // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID - // of the replication controller. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") - - // K8SResourceQuotaNameKey is the attribute Key conforming to the - // "k8s.resourcequota.name" semantic conventions. It represents the name of the - // resource quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") - - // K8SResourceQuotaUIDKey is the attribute Key conforming to the - // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the - // resource quota. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "opentelemetry" - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - - // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" - // semantic conventions. It represents the name of the K8s volume. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "volume0" - K8SVolumeNameKey = attribute.Key("k8s.volume.name") - - // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" - // semantic conventions. It represents the type of the K8s volume. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "emptyDir", "persistentVolumeClaim" - K8SVolumeTypeKey = attribute.Key("k8s.volume.type") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify a -// particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue -// conforming to the "k8s.container.status.last_terminated_reason" semantic -// conventions. It represents the last terminated reason of the Container. -func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { - return K8SContainerStatusLastTerminatedReasonKey.String(val) -} - -// K8SCronJobAnnotation returns an attribute KeyValue conforming to the -// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob -// annotation placed on the CronJob, the `` being the annotation name, the -// value being the annotation value. -func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.cronjob.annotation."+key, val) -} - -// K8SCronJobLabel returns an attribute KeyValue conforming to the -// "k8s.cronjob.label" semantic conventions. It represents the label placed on -// the CronJob, the `` being the label name, the value being the label -// value. -func K8SCronJobLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.cronjob.label."+key, val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.daemonset.annotation" semantic conventions. It represents the annotation -// key-value pairs placed on the DaemonSet. -func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.daemonset.annotation."+key, val) -} - -// K8SDaemonSetLabel returns an attribute KeyValue conforming to the -// "k8s.daemonset.label" semantic conventions. It represents the label key-value -// pairs placed on the DaemonSet. -func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.daemonset.label."+key, val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the -// "k8s.deployment.annotation" semantic conventions. It represents the annotation -// key-value pairs placed on the Deployment. -func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.deployment.annotation."+key, val) -} - -// K8SDeploymentLabel returns an attribute KeyValue conforming to the -// "k8s.deployment.label" semantic conventions. It represents the label key-value -// pairs placed on the Deployment. -func K8SDeploymentLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.deployment.label."+key, val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" -// semantic conventions. It represents the name of the horizontal pod autoscaler. -func K8SHPAName(val string) attribute.KeyValue { - return K8SHPANameKey.String(val) -} - -// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" -// semantic conventions. It represents the UID of the horizontal pod autoscaler. -func K8SHPAUID(val string) attribute.KeyValue { - return K8SHPAUIDKey.String(val) -} - -// K8SJobAnnotation returns an attribute KeyValue conforming to the -// "k8s.job.annotation" semantic conventions. It represents the annotation -// key-value pairs placed on the Job. -func K8SJobAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.job.annotation."+key, val) -} - -// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" -// semantic conventions. It represents the label key-value pairs placed on the -// Job. -func K8SJobLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.job.label."+key, val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the -// "k8s.namespace.annotation" semantic conventions. It represents the annotation -// key-value pairs placed on the Namespace. -func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.namespace.annotation."+key, val) -} - -// K8SNamespaceLabel returns an attribute KeyValue conforming to the -// "k8s.namespace.label" semantic conventions. It represents the label key-value -// pairs placed on the Namespace. -func K8SNamespaceLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.namespace.label."+key, val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeAnnotation returns an attribute KeyValue conforming to the -// "k8s.node.annotation" semantic conventions. It represents the annotation -// placed on the Node, the `` being the annotation name, the value being the -// annotation value, even if the value is empty. -func K8SNodeAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.node.annotation."+key, val) -} - -// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" -// semantic conventions. It represents the label placed on the Node, the `` -// being the label name, the value being the label value, even if the value is -// empty. -func K8SNodeLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.node.label."+key, val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" -// semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodAnnotation returns an attribute KeyValue conforming to the -// "k8s.pod.annotation" semantic conventions. It represents the annotation placed -// on the Pod, the `` being the annotation name, the value being the -// annotation value. -func K8SPodAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.pod.annotation."+key, val) -} - -// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" -// semantic conventions. It represents the label placed on the Pod, the `` -// being the label name, the value being the label value. -func K8SPodLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.pod.label."+key, val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.replicaset.annotation" semantic conventions. It represents the annotation -// key-value pairs placed on the ReplicaSet. -func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.replicaset.annotation."+key, val) -} - -// K8SReplicaSetLabel returns an attribute KeyValue conforming to the -// "k8s.replicaset.label" semantic conventions. It represents the label key-value -// pairs placed on the ReplicaSet. -func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.replicaset.label."+key, val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SReplicationControllerName returns an attribute KeyValue conforming to the -// "k8s.replicationcontroller.name" semantic conventions. It represents the name -// of the replication controller. -func K8SReplicationControllerName(val string) attribute.KeyValue { - return K8SReplicationControllerNameKey.String(val) -} - -// K8SReplicationControllerUID returns an attribute KeyValue conforming to the -// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of -// the replication controller. -func K8SReplicationControllerUID(val string) attribute.KeyValue { - return K8SReplicationControllerUIDKey.String(val) -} - -// K8SResourceQuotaName returns an attribute KeyValue conforming to the -// "k8s.resourcequota.name" semantic conventions. It represents the name of the -// resource quota. -func K8SResourceQuotaName(val string) attribute.KeyValue { - return K8SResourceQuotaNameKey.String(val) -} - -// K8SResourceQuotaUID returns an attribute KeyValue conforming to the -// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the -// resource quota. -func K8SResourceQuotaUID(val string) attribute.KeyValue { - return K8SResourceQuotaUIDKey.String(val) -} - -// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the -// "k8s.statefulset.annotation" semantic conventions. It represents the -// annotation key-value pairs placed on the StatefulSet. -func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { - return attribute.String("k8s.statefulset.annotation."+key, val) -} - -// K8SStatefulSetLabel returns an attribute KeyValue conforming to the -// "k8s.statefulset.label" semantic conventions. It represents the label -// key-value pairs placed on the StatefulSet. -func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { - return attribute.String("k8s.statefulset.label."+key, val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// K8SVolumeName returns an attribute KeyValue conforming to the -// "k8s.volume.name" semantic conventions. It represents the name of the K8s -// volume. -func K8SVolumeName(val string) attribute.KeyValue { - return K8SVolumeNameKey.String(val) -} - -// Enum values for k8s.namespace.phase -var ( - // Active namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") - // Terminating namespace phase as described by [K8s API] - // Stability: development - // - // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase - K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") -) - -// Enum values for k8s.volume.type -var ( - // A [persistentVolumeClaim] volume - // Stability: development - // - // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim - K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") - // A [configMap] volume - // Stability: development - // - // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap - K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") - // A [downwardAPI] volume - // Stability: development - // - // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi - K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") - // An [emptyDir] volume - // Stability: development - // - // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir - K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") - // A [secret] volume - // Stability: development - // - // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret - K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") - // A [local] volume - // Stability: development - // - // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local - K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") -) - -// Namespace: linux -const ( - // LinuxMemorySlabStateKey is the attribute Key conforming to the - // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab - // memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "reclaimable", "unreclaimable" - LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") -) - -// Enum values for linux.memory.slab.state -var ( - // reclaimable - // Stability: development - LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") - // unreclaimable - // Stability: development - LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") -) - -// Namespace: log -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "audit.log" - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the basename of - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "uuid.log" - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/log/mysql/audit.log" - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full path to - // the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/var/lib/docker/uuid.log" - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") - - // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic - // conventions. It represents the stream associated with the log. See below for - // a list of well-known values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - LogIostreamKey = attribute.Key("log.iostream") - - // LogRecordOriginalKey is the attribute Key conforming to the - // "log.record.original" semantic conventions. It represents the complete - // original Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - - // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" - // Note: This value MAY be added when processing a Log Record which was - // originally transmitted as a string or equivalent data type AND the Body field - // of the Log Record does not contain the same value. (e.g. a syslog or a log - // record read from a file.) - LogRecordOriginalKey = attribute.Key("log.record.original") - - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log Record. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an - // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other - // identifiers (e.g. UUID) may be used as needed. - // - // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogFileName returns an attribute KeyValue conforming to the "log.file.name" -// semantic conventions. It represents the basename of the file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" -// semantic conventions. It represents the full path to the file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path to -// the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// LogRecordOriginal returns an attribute KeyValue conforming to the -// "log.record.original" semantic conventions. It represents the complete -// original Log Record. -func LogRecordOriginal(val string) attribute.KeyValue { - return LogRecordOriginalKey.String(val) -} - -// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" -// semantic conventions. It represents a unique identifier for the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Enum values for log.iostream -var ( - // Logs from stdout stream - // Stability: development - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - // Stability: development - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// Namespace: messaging -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the batching - // operation. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client library - // supports both batch and single-message API for the same operation, - // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs - // and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client.id" semantic conventions. It represents a unique identifier - // for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "client-5", "myhost@8742@s8083jm" - MessagingClientIDKey = attribute.Key("messaging.client.id") - - // MessagingConsumerGroupNameKey is the attribute Key conforming to the - // "messaging.consumer.group.name" semantic conventions. It represents the name - // of the consumer group with which a consumer is associated. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-group", "indexer" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.consumer.group.name` is applicable and what it means in - // the context of that system. - MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the message - // destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MyQueue", "MyTopic" - // Note: Destination name SHOULD uniquely identify a specific queue, topic or - // other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD uniquely - // identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationPartitionIDKey is the attribute Key conforming to the - // "messaging.destination.partition.id" semantic conventions. It represents the - // identifier of the partition messages are sent to or received from, unique - // within the `messaging.destination.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") - - // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to - // the "messaging.destination.subscription.name" semantic conventions. It - // represents the name of the destination subscription from which a message is - // consumed. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "subscription-a" - // Note: Semantic conventions for individual messaging systems SHOULD document - // whether `messaging.destination.subscription.name` is applicable and what it - // means in the context of that system. - MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the low - // cardinality representation of the messaging destination name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/customers/{customerId}" - // Note: Destination names could be constructed from templates. An example would - // be a destination name involving a user name or product id. Although the - // destination name in this case is of high cardinality, the underlying template - // is of low cardinality and can be effectively used for grouping and - // aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might not - // exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") - - // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It - // represents the ack deadline in seconds set for the modify ack deadline - // request. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") - - // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the - // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the - // ack id for a given message. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ack_id - MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") - - // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. - // It represents the delivery attempt for a given message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") - - // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to - // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It - // represents the ordering key for a given message. If the attribute is not - // present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: ordering_key - MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the message - // keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message.id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents a - // boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingKafkaOffsetKey is the attribute Key conforming to the - // "messaging.kafka.offset" semantic conventions. It represents the offset of a - // record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the size of - // the message body in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed body size. If - // both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents the - // conversation ID identifying the conversation to which the message belongs, - // represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: MyConversationId - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents the - // size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Note: This can refer to both the compressed or uncompressed size. If both - // sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used by - // the messaging system as an identifier for the message, represented as a - // string. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 452a7c7c7c7048c2f887f61572b18fc2 - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationNameKey is the attribute Key conforming to the - // "messaging.operation.name" semantic conventions. It represents the - // system-specific name of the messaging operation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ack", "nack", "send" - MessagingOperationNameKey = attribute.Key("messaging.operation.name") - - // MessagingOperationTypeKey is the attribute Key conforming to the - // "messaging.operation.type" semantic conventions. It represents a string - // identifying the type of the messaging operation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationTypeKey = attribute.Key("messaging.operation.type") - - // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to - // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It - // represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myKey - MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the - // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents - // the rabbitMQ message delivery tag. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") - - // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the - // "messaging.rocketmq.consumption_model" semantic conventions. It represents - // the model of message consumption. This only applies to consumer spans. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to - // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It - // represents the delay time level for delay message, which determines the - // message delay time. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming - // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. - // It represents the timestamp in milliseconds that the delay message is - // expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents the it - // is essential for FIFO message. Messages that belong to the same message group - // are always processed one by one within the same consumer group. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myMessageGroup - MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents the - // key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "keyA", "keyB" - MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketMQMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: tagA - MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents the - // type of message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketMQNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myNamespace - MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to - // the "messaging.servicebus.disposition_status" semantic conventions. It - // represents the describes the [settlement type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock - MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") - - // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to - // the "messaging.servicebus.message.delivery_count" semantic conventions. It - // represents the number of deliveries that have been attempted for this - // message. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") - - // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to - // the "messaging.servicebus.message.enqueued_time" semantic conventions. It - // represents the UTC epoch seconds at which the message has been accepted and - // stored in the entity. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") - - // MessagingSystemKey is the attribute Key conforming to the "messaging.system" - // semantic conventions. It represents the messaging system as identified by the - // client instrumentation. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The actual messaging system may differ from the one known by the - // client. For example, when using Kafka client libraries to communicate with - // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the - // instrumentation's best knowledge. - MessagingSystemKey = attribute.Key("messaging.system") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to the -// "messaging.batch.message_count" semantic conventions. It represents the number -// of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client.id" semantic conventions. It represents a unique identifier -// for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingConsumerGroupName returns an attribute KeyValue conforming to the -// "messaging.consumer.group.name" semantic conventions. It represents the name -// of the consumer group with which a consumer is associated. -func MessagingConsumerGroupName(val string) attribute.KeyValue { - return MessagingConsumerGroupNameKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the -// "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be unnamed -// or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name. -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationPartitionID returns an attribute KeyValue conforming to -// the "messaging.destination.partition.id" semantic conventions. It represents -// the identifier of the partition messages are sent to or received from, unique -// within the `messaging.destination.name`. -func MessagingDestinationPartitionID(val string) attribute.KeyValue { - return MessagingDestinationPartitionIDKey.String(val) -} - -// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming -// to the "messaging.destination.subscription.name" semantic conventions. It -// represents the name of the destination subscription from which a message is -// consumed. -func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { - return MessagingDestinationSubscriptionNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to the -// "messaging.destination.template" semantic conventions. It represents the low -// cardinality representation of the messaging destination name. -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to the -// "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming -// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It -// represents the UTC epoch seconds at which the message has been accepted and -// stored in the entity. -func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) -} - -// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It -// represents the ack deadline in seconds set for the modify ack deadline -// request. -func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { - return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) -} - -// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the -// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the -// ack id for a given message. -func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { - return MessagingGCPPubSubMessageAckIDKey.String(val) -} - -// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic -// conventions. It represents the delivery attempt for a given message. -func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { - return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) -} - -// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming -// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It -// represents the ordering key for a given message. If the attribute is not -// present, the message does not have an ordering key. -func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubSubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the message -// keys in Kafka are used for grouping alike messages to ensure they're processed -// on the same partition. They differ from `messaging.message.id` in that they're -// not unique. If the key is `null`, the attribute MUST NOT be set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the -// "messaging.kafka.message.tombstone" semantic conventions. It represents a -// boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingKafkaOffset returns an attribute KeyValue conforming to the -// "messaging.kafka.offset" semantic conventions. It represents the offset of a -// record in the corresponding Kafka partition. -func MessagingKafkaOffset(val int) attribute.KeyValue { - return MessagingKafkaOffsetKey.Int(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size of -// the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming to the -// "messaging.message.conversation_id" semantic conventions. It represents the -// conversation ID identifying the conversation to which the message belongs, -// represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the -// "messaging.message.envelope.size" semantic conventions. It represents the size -// of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by the -// messaging system as an identifier for the message, represented as a string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingOperationName returns an attribute KeyValue conforming to the -// "messaging.operation.name" semantic conventions. It represents the -// system-specific name of the messaging operation. -func MessagingOperationName(val string) attribute.KeyValue { - return MessagingOperationNameKey.String(val) -} - -// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitMQDestinationRoutingKeyKey.String(val) -} - -// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming -// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It -// represents the rabbitMQ message delivery tag. -func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { - return MessagingRabbitMQMessageDeliveryTagKey.Int(val) -} - -// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.group" semantic conventions. It represents the it -// is essential for FIFO message. Messages that belong to the same message group -// are always processed one by one within the same consumer group. -func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { - return MessagingRocketMQMessageGroupKey.String(val) -} - -// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.keys" semantic conventions. It represents the -// key(s) of message, another way to mark message besides message id. -func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketMQMessageKeysKey.StringSlice(val) -} - -// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the -// "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketMQMessageTag(val string) attribute.KeyValue { - return MessagingRocketMQMessageTagKey.String(val) -} - -// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the -// "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketMQNamespace(val string) attribute.KeyValue { - return MessagingRocketMQNamespaceKey.String(val) -} - -// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.delivery_count" semantic -// conventions. It represents the number of deliveries that have been attempted -// for this message. -func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { - return MessagingServiceBusMessageDeliveryCountKey.Int(val) -} - -// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue -// conforming to the "messaging.servicebus.message.enqueued_time" semantic -// conventions. It represents the UTC epoch seconds at which the message has been -// accepted and stored in the entity. -func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { - return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) -} - -// Enum values for messaging.operation.type -var ( - // A message is created. "Create" spans always refer to a single message and are - // used to provide a unique creation context for messages in batch sending - // scenarios. - // - // Stability: development - MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") - // One or more messages are provided for sending to an intermediary. If a single - // message is sent, the context of the "Send" span can be used as the creation - // context and no "Create" span needs to be created. - // - // Stability: development - MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") - // One or more messages are requested by a consumer. This operation refers to - // pull-based scenarios, where consumers explicitly call methods of messaging - // SDKs to receive messages. - // - // Stability: development - MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") - // One or more messages are processed by a consumer. - // - // Stability: development - MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") - // One or more messages are settled. - // - // Stability: development - MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") - // Deprecated: Replaced by `process`. - MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver") - // Deprecated: Replaced by `send`. - MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") -) - -// Enum values for messaging.rocketmq.consumption_model -var ( - // Clustering consumption model - // Stability: development - MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") - // Broadcasting consumption model - // Stability: development - MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") -) - -// Enum values for messaging.rocketmq.message.type -var ( - // Normal message - // Stability: development - MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") - // FIFO message - // Stability: development - MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") - // Delay message - // Stability: development - MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") - // Transaction message - // Stability: development - MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") -) - -// Enum values for messaging.servicebus.disposition_status -var ( - // Message is completed - // Stability: development - MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") - // Message is abandoned - // Stability: development - MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") - // Message is sent to dead letter queue - // Stability: development - MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") - // Message is deferred - // Stability: development - MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") -) - -// Enum values for messaging.system -var ( - // Apache ActiveMQ - // Stability: development - MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - // Stability: development - MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - // Stability: development - MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") - // Azure Event Hubs - // Stability: development - MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") - // Azure Service Bus - // Stability: development - MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") - // Google Cloud Pub/Sub - // Stability: development - MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - // Stability: development - MessagingSystemJMS = MessagingSystemKey.String("jms") - // Apache Kafka - // Stability: development - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - // Stability: development - MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - // Stability: development - MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") - // Apache Pulsar - // Stability: development - MessagingSystemPulsar = MessagingSystemKey.String("pulsar") -) - -// Namespace: network -const ( - // NetworkCarrierICCKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier network. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: DE - NetworkCarrierICCKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMCCKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile carrier - // country code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 310 - NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMNCKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile carrier - // network code. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 001 - NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of the - // mobile carrier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: sprint - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionStateKey is the attribute Key conforming to the - // "network.connection.state" semantic conventions. It represents the state of - // network connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "close_wait" - // Note: Connection states are defined as part of the [rfc9293] - // - // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 - NetworkConnectionStateKey = attribute.Key("network.connection.state") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the this - // describes more details regarding the connection.type. It may be the type of - // cell technology connection, but it could be used for describing details about - // a wifi connection. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: LTE - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the internet - // connection type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: wifi - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkInterfaceNameKey is the attribute Key conforming to the - // "network.interface.name" semantic conventions. It represents the network - // interface name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "lo", "eth0" - NetworkInterfaceNameKey = attribute.Key("network.interface.name") - - // NetworkIODirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "transmit" - NetworkIODirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer address - // of the network connection - IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "10.1.2.80", "/tmp/my.sock" - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" - // semantic conventions. It represents the peer port number of the network - // connection. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the - // [OSI application layer] or non-OSI equivalent. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "amqp", "http", "mqtt" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI application layer]: https://wikipedia.org/wiki/Application_layer - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the actual - // version of the protocol used for network communication. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.1", "2" - // Note: If protocol version is subject to negotiation (for example using [ALPN] - // ), this attribute SHOULD be set to the negotiated version. If the actual - // protocol version is not known, this attribute SHOULD NOT be set. - // - // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the - // [OSI transport layer] or [inter-process communication method]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "tcp", "udp" - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port 12345. - // - // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer - // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic - // conventions. It represents the [OSI network layer] or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "ipv4", "ipv6" - // Note: The value SHOULD be normalized to lowercase. - // - // [OSI network layer]: https://wikipedia.org/wiki/Network_layer - NetworkTypeKey = attribute.Key("network.type") -) - -// NetworkCarrierICC returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierICC(val string) attribute.KeyValue { - return NetworkCarrierICCKey.String(val) -} - -// NetworkCarrierMCC returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMCC(val string) attribute.KeyValue { - return NetworkCarrierMCCKey.String(val) -} - -// NetworkCarrierMNC returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMNC(val string) attribute.KeyValue { - return NetworkCarrierMNCKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkInterfaceName returns an attribute KeyValue conforming to the -// "network.interface.name" semantic conventions. It represents the network -// interface name. -func NetworkInterfaceName(val string) attribute.KeyValue { - return NetworkInterfaceNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local address -// of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port number -// of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address of -// the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the -// [OSI application layer] or non-OSI equivalent. -// -// [OSI application layer]: https://wikipedia.org/wiki/Application_layer -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the actual -// version of the protocol used for network communication. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Enum values for network.connection.state -var ( - // closed - // Stability: development - NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") - // close_wait - // Stability: development - NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") - // closing - // Stability: development - NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") - // established - // Stability: development - NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") - // fin_wait_1 - // Stability: development - NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") - // fin_wait_2 - // Stability: development - NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") - // last_ack - // Stability: development - NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") - // listen - // Stability: development - NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") - // syn_received - // Stability: development - NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") - // syn_sent - // Stability: development - NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") - // time_wait - // Stability: development - NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") -) - -// Enum values for network.connection.subtype -var ( - // GPRS - // Stability: development - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - // Stability: development - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - // Stability: development - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - // Stability: development - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - // Stability: development - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - // Stability: development - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - // Stability: development - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - // Stability: development - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - // Stability: development - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - // Stability: development - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - // Stability: development - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - // Stability: development - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - // Stability: development - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - // Stability: development - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - // Stability: development - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - // Stability: development - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - // Stability: development - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - // Stability: development - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - // Stability: development - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - // Stability: development - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - // Stability: development - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -// Enum values for network.connection.type -var ( - // wifi - // Stability: development - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - // Stability: development - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - // Stability: development - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - // Stability: development - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - // Stability: development - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -// Enum values for network.io.direction -var ( - // transmit - // Stability: development - NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") - // receive - // Stability: development - NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") -) - -// Enum values for network.transport -var ( - // TCP - // Stability: stable - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - // Stability: stable - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe. - // Stability: stable - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - // Stability: stable - NetworkTransportUnix = NetworkTransportKey.String("unix") - // QUIC - // Stability: stable - NetworkTransportQUIC = NetworkTransportKey.String("quic") -) - -// Enum values for network.type -var ( - // IPv4 - // Stability: stable - NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") - // IPv6 - // Stability: stable - NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") -) - -// Namespace: oci -const ( - // OCIManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of the - // OCI image manifest. For container images specifically is the digest by which - // the container image is known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" - // Note: Follows [OCI Image Manifest Specification], and specifically the - // [Digest property]. - // An example can be found in [Example Image Manifest]. - // - // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md - // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests - // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest - OCIManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OCIManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OCIManifestDigest(val string) attribute.KeyValue { - return OCIManifestDigestKey.String(val) -} - -// Namespace: opentracing -const ( - // OpenTracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the parent-child - // Reference type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: The causal relationship between a child Span and a parent Span. - OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -// Enum values for opentracing.ref_type -var ( - // The parent Span depends on the child Span in some capacity - // Stability: development - OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - // Stability: development - OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") -) - -// Namespace: os -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic - // conventions. It represents the unique identifier for a particular build or - // compilation of the operating system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TQ3C.230805.001.B2", "20E247", "22621" - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to be - // parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" semantic - // conventions. It represents the version string of the operating system as - // defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - OSVersionKey = attribute.Key("os.version") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the "os.description" -// semantic conventions. It represents the human readable (not intended to be -// parsed) OS version information, like e.g. reported by `ver` or -// `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating system -// as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// Enum values for os.type -var ( - // Microsoft Windows - // Stability: development - OSTypeWindows = OSTypeKey.String("windows") - // Linux - // Stability: development - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - // Stability: development - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - // Stability: development - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - // Stability: development - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - // Stability: development - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - // Stability: development - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - // Stability: development - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - // Stability: development - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - // Stability: development - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - // Stability: development - OSTypeZOS = OSTypeKey.String("z_os") -) - -// Namespace: otel -const ( - // OTelComponentNameKey is the attribute Key conforming to the - // "otel.component.name" semantic conventions. It represents a name uniquely - // identifying the instance of the OpenTelemetry component within its containing - // SDK instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otlp_grpc_span_exporter/0", "custom-name" - // Note: Implementations SHOULD ensure a low cardinality for this attribute, - // even across application or SDK restarts. - // E.g. implementations MUST NOT use UUIDs as values for this attribute. - // - // Implementations MAY achieve these goals by following a - // `/` pattern, e.g. - // `batching_span_processor/0`. - // Hereby `otel.component.type` refers to the corresponding attribute value of - // the component. - // - // The value of `instance-counter` MAY be automatically assigned by the - // component and uniqueness within the enclosing SDK instance MUST be - // guaranteed. - // For example, `` MAY be implemented by using a monotonically - // increasing counter (starting with `0`), which is incremented every time an - // instance of the given component type is started. - // - // With this implementation, for example the first Batching Span Processor would - // have `batching_span_processor/0` - // as `otel.component.name`, the second one `batching_span_processor/1` and so - // on. - // These values will therefore be reused in the case of an application restart. - OTelComponentNameKey = attribute.Key("otel.component.name") - - // OTelComponentTypeKey is the attribute Key conforming to the - // "otel.component.type" semantic conventions. It represents a name identifying - // the type of the OpenTelemetry component. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "batching_span_processor", "com.example.MySpanExporter" - // Note: If none of the standardized values apply, implementations SHOULD use - // the language-defined name of the type. - // E.g. for Java the fully qualified classname SHOULD be used in this case. - OTelComponentTypeKey = attribute.Key("otel.component.type") - - // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" - // semantic conventions. It represents the name of the instrumentation scope - ( - // `InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "io.opentelemetry.contrib.mongodb" - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of the - // instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.0.0" - OTelScopeVersionKey = attribute.Key("otel.scope.version") - - // OTelSpanSamplingResultKey is the attribute Key conforming to the - // "otel.span.sampling_result" semantic conventions. It represents the result - // value of the sampler for this span. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") - - // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" - // semantic conventions. It represents the name of the code, either "OK" or - // "ERROR". MUST NOT be set if the status code is UNSET. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the description - // of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "resource not found" - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -// OTelComponentName returns an attribute KeyValue conforming to the -// "otel.component.name" semantic conventions. It represents a name uniquely -// identifying the instance of the OpenTelemetry component within its containing -// SDK instance. -func OTelComponentName(val string) attribute.KeyValue { - return OTelComponentNameKey.String(val) -} - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the description -// of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// Enum values for otel.component.type -var ( - // The builtin SDK batching span processor - // - // Stability: development - OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") - // The builtin SDK simple span processor - // - // Stability: development - OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") - // The builtin SDK batching log record processor - // - // Stability: development - OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") - // The builtin SDK simple log record processor - // - // Stability: development - OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") - // OTLP span exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") - // OTLP span exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") - // OTLP span exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") - // OTLP log record exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") - // OTLP log record exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") - // OTLP log record exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") - // The builtin SDK periodically exporting metric reader - // - // Stability: development - OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") - // OTLP metric exporter over gRPC with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") - // OTLP metric exporter over HTTP with protobuf serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") - // OTLP metric exporter over HTTP with JSON serialization - // - // Stability: development - OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") -) - -// Enum values for otel.span.sampling_result -var ( - // The span is not sampled and not recording - // Stability: development - OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") - // The span is not sampled, but recording - // Stability: development - OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") - // The span is sampled and recording - // Stability: development - OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") -) - -// Enum values for otel.status_code -var ( - // The operation has been validated by an Application developer or Operator to - // have completed successfully. - // Stability: stable - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error. - // Stability: stable - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// Namespace: peer -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic - // conventions. It represents the [`service.name`] of the remote service. SHOULD - // be equal to the actual `service.name` resource attribute of the remote - // service if any. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: AuthTokenCache - // - // [`service.name`]: /docs/resource/README.md#service - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the "peer.service" -// semantic conventions. It represents the [`service.name`] of the remote -// service. SHOULD be equal to the actual `service.name` resource attribute of -// the remote service if any. -// -// [`service.name`]: /docs/resource/README.md#service -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// Namespace: process -const ( - // ProcessArgsCountKey is the attribute Key conforming to the - // "process.args_count" semantic conventions. It represents the length of the - // process.command_args array. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 4 - // Note: This field can be useful for querying or performing bucket analysis on - // how many arguments were provided to start a process. More arguments may be an - // indication of suspicious activity. - ProcessArgsCountKey = attribute.Key("process.args_count") - - // ProcessCommandKey is the attribute Key conforming to the "process.command" - // semantic conventions. It represents the command used to launch the process - // (i.e. the command name). On Linux based systems, can be set to the zeroth - // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter - // extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otelcol" - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received by - // the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this - // would be the full argv vector passed to `main`. SHOULD NOT be collected by - // default unless there is sanitization that excludes sensitive data. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cmd/otecol", "--config=config.yaml" - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full command - // used to launch the process as a single string representing the full command. - // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if - // you have to assemble it just for monitoring; use `process.command_args` - // instead. SHOULD NOT be collected by default unless there is sanitization that - // excludes sensitive data. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessContextSwitchTypeKey is the attribute Key conforming to the - // "process.context_switch_type" semantic conventions. It represents the - // specifies whether the context switches for this data point were voluntary or - // involuntary. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") - - // ProcessCreationTimeKey is the attribute Key conforming to the - // "process.creation.time" semantic conventions. It represents the date and time - // the process was created, in ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:25:34.853Z" - ProcessCreationTimeKey = attribute.Key("process.creation.time") - - // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the - // "process.executable.build_id.gnu" semantic conventions. It represents the GNU - // build ID as found in the `.note.gnu.build-id` ELF section (hex string). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" - ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") - - // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the - // "process.executable.build_id.go" semantic conventions. It represents the Go - // build ID as retrieved by `go tool buildid `. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" - ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") - - // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the - // "process.executable.build_id.htlhash" semantic conventions. It represents the - // profiling specific build ID for executables. See the OTel specification for - // Profiles for more information. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "600DCAFE4A110000F2BF38C493F5FB92" - ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name of the - // process executable. On Linux based systems, this SHOULD be set to the base - // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to - // the base name of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "otelcol" - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full path - // to the process executable. On Linux based systems, can be set to the target - // of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/usr/bin/cmd/otelcol" - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" - // semantic conventions. It represents the exit code of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 127 - ProcessExitCodeKey = attribute.Key("process.exit.code") - - // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" - // semantic conventions. It represents the date and time the process exited, in - // ISO 8601 format. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2023-11-21T09:26:12.315Z" - ProcessExitTimeKey = attribute.Key("process.exit.time") - - // ProcessGroupLeaderPIDKey is the attribute Key conforming to the - // "process.group_leader.pid" semantic conventions. It represents the PID of the - // process's group leader. This is also the process group ID (PGID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 23 - ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") - - // ProcessInteractiveKey is the attribute Key conforming to the - // "process.interactive" semantic conventions. It represents the whether the - // process is connected to an interactive shell. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessInteractiveKey = attribute.Key("process.interactive") - - // ProcessLinuxCgroupKey is the attribute Key conforming to the - // "process.linux.cgroup" semantic conventions. It represents the control group - // associated with the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", - // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" - // Note: Control groups (cgroups) are a kernel feature used to organize and - // manage process resources. This attribute provides the path(s) to the - // cgroup(s) associated with the process, which should match the contents of the - // [/proc/[PID]/cgroup] file. - // - // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html - ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessPagingFaultTypeKey is the attribute Key conforming to the - // "process.paging.fault_type" semantic conventions. It represents the type of - // page fault for this data point. Type `major` is for major/hard page faults, - // and `minor` is for minor/soft page faults. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent Process - // identifier (PPID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic - // conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRealUserIDKey is the attribute Key conforming to the - // "process.real_user.id" semantic conventions. It represents the real user ID - // (RUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1000 - ProcessRealUserIDKey = attribute.Key("process.real_user.id") - - // ProcessRealUserNameKey is the attribute Key conforming to the - // "process.real_user.name" semantic conventions. It represents the username of - // the real user of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessRealUserNameKey = attribute.Key("process.real_user.name") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of the - // runtime of this process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "OpenJDK Runtime Environment" - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the version of - // the runtime of this process, as returned by the runtime without modification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14.0.2 - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - - // ProcessSavedUserIDKey is the attribute Key conforming to the - // "process.saved_user.id" semantic conventions. It represents the saved user ID - // (SUID) of the process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1002 - ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") - - // ProcessSavedUserNameKey is the attribute Key conforming to the - // "process.saved_user.name" semantic conventions. It represents the username of - // the saved user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "operator" - ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") - - // ProcessSessionLeaderPIDKey is the attribute Key conforming to the - // "process.session_leader.pid" semantic conventions. It represents the PID of - // the process's session leader. This is also the session ID (SID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 14 - ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") - - // ProcessTitleKey is the attribute Key conforming to the "process.title" - // semantic conventions. It represents the process title (proctitle). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cat /etc/hostname", "xfce4-session", "bash" - // Note: In many Unix-like systems, process title (proctitle), is the string - // that represents the name or command line of a running process, displayed by - // system monitoring tools like ps, top, and htop. - ProcessTitleKey = attribute.Key("process.title") - - // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" - // semantic conventions. It represents the effective user ID (EUID) of the - // process. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1001 - ProcessUserIDKey = attribute.Key("process.user.id") - - // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" - // semantic conventions. It represents the username of the effective user of the - // process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "root" - ProcessUserNameKey = attribute.Key("process.user.name") - - // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic - // conventions. It represents the virtual process identifier. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 12 - // Note: The process ID within a PID namespace. This is not necessarily unique - // across all processes on the host but it is unique within the process - // namespace that the process exists within. - ProcessVpidKey = attribute.Key("process.vpid") - - // ProcessWorkingDirectoryKey is the attribute Key conforming to the - // "process.working_directory" semantic conventions. It represents the working - // directory of the process. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/root" - ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") -) - -// ProcessArgsCount returns an attribute KeyValue conforming to the -// "process.args_count" semantic conventions. It represents the length of the -// process.command_args array. -func ProcessArgsCount(val int) attribute.KeyValue { - return ProcessArgsCountKey.Int(val) -} - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be set -// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the -// first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the command -// arguments (including the command/executable itself) as received by the -// process. On Linux-based systems (and some other Unixoid systems supporting -// procfs), can be set according to the list of null-delimited strings extracted -// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full -// argv vector passed to `main`. SHOULD NOT be collected by default unless there -// is sanitization that excludes sensitive data. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if -// you have to assemble it just for monitoring; use `process.command_args` -// instead. SHOULD NOT be collected by default unless there is sanitization that -// excludes sensitive data. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessCreationTime returns an attribute KeyValue conforming to the -// "process.creation.time" semantic conventions. It represents the date and time -// the process was created, in ISO 8601 format. -func ProcessCreationTime(val string) attribute.KeyValue { - return ProcessCreationTimeKey.String(val) -} - -// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the -// "process.environment_variable" semantic conventions. It represents the process -// environment variables, being the environment variable name, the value -// being the environment variable value. -func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { - return attribute.String("process.environment_variable."+key, val) -} - -// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the -// "process.executable.build_id.gnu" semantic conventions. It represents the GNU -// build ID as found in the `.note.gnu.build-id` ELF section (hex string). -func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGNUKey.String(val) -} - -// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the -// "process.executable.build_id.go" semantic conventions. It represents the Go -// build ID as retrieved by `go tool buildid `. -func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { - return ProcessExecutableBuildIDGoKey.String(val) -} - -// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to -// the "process.executable.build_id.htlhash" semantic conventions. It represents -// the profiling specific build ID for executables. See the OTel specification -// for Profiles for more information. -func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { - return ProcessExecutableBuildIDHtlhashKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of the -// process executable. On Linux based systems, this SHOULD be set to the base -// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the -// base name of `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path to -// the process executable. On Linux based systems, can be set to the target of -// `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessExitCode returns an attribute KeyValue conforming to the -// "process.exit.code" semantic conventions. It represents the exit code of the -// process. -func ProcessExitCode(val int) attribute.KeyValue { - return ProcessExitCodeKey.Int(val) -} - -// ProcessExitTime returns an attribute KeyValue conforming to the -// "process.exit.time" semantic conventions. It represents the date and time the -// process exited, in ISO 8601 format. -func ProcessExitTime(val string) attribute.KeyValue { - return ProcessExitTimeKey.String(val) -} - -// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the -// "process.group_leader.pid" semantic conventions. It represents the PID of the -// process's group leader. This is also the process group ID (PGID) of the -// process. -func ProcessGroupLeaderPID(val int) attribute.KeyValue { - return ProcessGroupLeaderPIDKey.Int(val) -} - -// ProcessInteractive returns an attribute KeyValue conforming to the -// "process.interactive" semantic conventions. It represents the whether the -// process is connected to an interactive shell. -func ProcessInteractive(val bool) attribute.KeyValue { - return ProcessInteractiveKey.Bool(val) -} - -// ProcessLinuxCgroup returns an attribute KeyValue conforming to the -// "process.linux.cgroup" semantic conventions. It represents the control group -// associated with the process. -func ProcessLinuxCgroup(val string) attribute.KeyValue { - return ProcessLinuxCgroupKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" -// semantic conventions. It represents the username of the user that owns the -// process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRealUserID returns an attribute KeyValue conforming to the -// "process.real_user.id" semantic conventions. It represents the real user ID -// (RUID) of the process. -func ProcessRealUserID(val int) attribute.KeyValue { - return ProcessRealUserIDKey.Int(val) -} - -// ProcessRealUserName returns an attribute KeyValue conforming to the -// "process.real_user.name" semantic conventions. It represents the username of -// the real user of the process. -func ProcessRealUserName(val string) attribute.KeyValue { - return ProcessRealUserNameKey.String(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// ProcessSavedUserID returns an attribute KeyValue conforming to the -// "process.saved_user.id" semantic conventions. It represents the saved user ID -// (SUID) of the process. -func ProcessSavedUserID(val int) attribute.KeyValue { - return ProcessSavedUserIDKey.Int(val) -} - -// ProcessSavedUserName returns an attribute KeyValue conforming to the -// "process.saved_user.name" semantic conventions. It represents the username of -// the saved user. -func ProcessSavedUserName(val string) attribute.KeyValue { - return ProcessSavedUserNameKey.String(val) -} - -// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the -// "process.session_leader.pid" semantic conventions. It represents the PID of -// the process's session leader. This is also the session ID (SID) of the -// process. -func ProcessSessionLeaderPID(val int) attribute.KeyValue { - return ProcessSessionLeaderPIDKey.Int(val) -} - -// ProcessTitle returns an attribute KeyValue conforming to the "process.title" -// semantic conventions. It represents the process title (proctitle). -func ProcessTitle(val string) attribute.KeyValue { - return ProcessTitleKey.String(val) -} - -// ProcessUserID returns an attribute KeyValue conforming to the -// "process.user.id" semantic conventions. It represents the effective user ID -// (EUID) of the process. -func ProcessUserID(val int) attribute.KeyValue { - return ProcessUserIDKey.Int(val) -} - -// ProcessUserName returns an attribute KeyValue conforming to the -// "process.user.name" semantic conventions. It represents the username of the -// effective user of the process. -func ProcessUserName(val string) attribute.KeyValue { - return ProcessUserNameKey.String(val) -} - -// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" -// semantic conventions. It represents the virtual process identifier. -func ProcessVpid(val int) attribute.KeyValue { - return ProcessVpidKey.Int(val) -} - -// ProcessWorkingDirectory returns an attribute KeyValue conforming to the -// "process.working_directory" semantic conventions. It represents the working -// directory of the process. -func ProcessWorkingDirectory(val string) attribute.KeyValue { - return ProcessWorkingDirectoryKey.String(val) -} - -// Enum values for process.context_switch_type -var ( - // voluntary - // Stability: development - ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") - // involuntary - // Stability: development - ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") -) - -// Enum values for process.paging.fault_type -var ( - // major - // Stability: development - ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") - // minor - // Stability: development - ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") -) - -// Namespace: profile -const ( - // ProfileFrameTypeKey is the attribute Key conforming to the - // "profile.frame.type" semantic conventions. It represents the describes the - // interpreter or compiler of a single frame. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "cpython" - ProfileFrameTypeKey = attribute.Key("profile.frame.type") -) - -// Enum values for profile.frame.type -var ( - // [.NET] - // - // Stability: development - // - // [.NET]: https://wikipedia.org/wiki/.NET - ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") - // [JVM] - // - // Stability: development - // - // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine - ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") - // [Kernel] - // - // Stability: development - // - // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) - ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") - // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a - // more precise value MUST be used. - // - // Stability: development - // - // [C]: https://wikipedia.org/wiki/C_(programming_language) - // [C++]: https://wikipedia.org/wiki/C%2B%2B - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") - // [Perl] - // - // Stability: development - // - // [Perl]: https://wikipedia.org/wiki/Perl - ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") - // [PHP] - // - // Stability: development - // - // [PHP]: https://wikipedia.org/wiki/PHP - ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") - // [Python] - // - // Stability: development - // - // [Python]: https://wikipedia.org/wiki/Python_(programming_language) - ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") - // [Ruby] - // - // Stability: development - // - // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) - ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") - // [V8JS] - // - // Stability: development - // - // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) - ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") - // [Erlang] - // - // Stability: development - // - // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) - ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") - // [Go], - // - // Stability: development - // - // [Go]: https://wikipedia.org/wiki/Go_(programming_language) - ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") - // [Rust] - // - // Stability: development - // - // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) - ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") -) - -// Namespace: rpc -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes] of the Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [error codes]: https://connectrpc.com//docs/protocol/#error-codes - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the - // [numeric status code] of the gRPC request. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` - // property of response if it is an error response. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: -32700, 100 - RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Parse error", "User already exists" - RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJSONRPCRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, string, - // `null` or missing (for notifications), value is expected to be cast to string - // for simplicity. Use empty string in case of `null` value. Omit entirely if - // this is a notification. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "10", "request-7", "" - RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJSONRPCVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2.0", "1.0" - RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMessageCompressedSizeKey is the attribute Key conforming to the - // "rpc.message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") - - // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" - // semantic conventions. It MUST be calculated as two different counters - // starting from `1` one for sent messages and one for received message.. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This way we guarantee that the values will be consistent between - // different implementations. - RPCMessageIDKey = attribute.Key("rpc.message.id") - - // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" - // semantic conventions. It represents the whether this is a received or sent - // message. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageTypeKey = attribute.Key("rpc.message.type") - - // RPCMessageUncompressedSizeKey is the attribute Key conforming to the - // "rpc.message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic - // conventions. It represents the name of the (logical) method being called, - // must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: exampleMethod - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function.name` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic - // conventions. It represents the full (logical) name of the service being - // called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: myservice.EchoService - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic - // conventions. It represents a string identifying the remoting system. See - // below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - RPCSystemKey = attribute.Key("rpc.system") -) - -// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the -// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the -// connect request metadata, `` being the normalized Connect Metadata key -// (lowercase), the value being the metadata values. -func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) -} - -// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the -// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the -// connect response metadata, `` being the normalized Connect Metadata key -// (lowercase), the value being the metadata values. -func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) -} - -// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the -// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC -// request metadata, `` being the normalized gRPC Metadata key (lowercase), -// the value being the metadata values. -func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) -} - -// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the -// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC -// response metadata, `` being the normalized gRPC Metadata key (lowercase), -// the value being the metadata values. -func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { - return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) -} - -// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` -// property of response if it is an error response. -func RPCJSONRPCErrorCode(val int) attribute.KeyValue { - return RPCJSONRPCErrorCodeKey.Int(val) -} - -// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { - return RPCJSONRPCErrorMessageKey.String(val) -} - -// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property -// of request or response. Since protocol allows id to be int, string, `null` or -// missing (for notifications), value is expected to be cast to string for -// simplicity. Use empty string in case of `null` value. Omit entirely if this is -// a notification. -func RPCJSONRPCRequestID(val string) attribute.KeyValue { - return RPCJSONRPCRequestIDKey.String(val) -} - -// RPCJSONRPCVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version -// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't -// specify this, the value can be omitted. -func RPCJSONRPCVersion(val string) attribute.KeyValue { - return RPCJSONRPCVersionKey.String(val) -} - -// RPCMessageCompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.compressed_size" semantic conventions. It represents the -// compressed size of the message in bytes. -func RPCMessageCompressedSize(val int) attribute.KeyValue { - return RPCMessageCompressedSizeKey.Int(val) -} - -// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" -// semantic conventions. It MUST be calculated as two different counters starting -// from `1` one for sent messages and one for received message.. -func RPCMessageID(val int) attribute.KeyValue { - return RPCMessageIDKey.Int(val) -} - -// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the -// "rpc.message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func RPCMessageUncompressedSize(val int) attribute.KeyValue { - return RPCMessageUncompressedSizeKey.Int(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// Enum values for rpc.connect_rpc.error_code -var ( - // cancelled - // Stability: development - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - // Stability: development - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - // Stability: development - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - // Stability: development - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - // Stability: development - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - // Stability: development - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - // Stability: development - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - // Stability: development - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - // Stability: development - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - // Stability: development - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - // Stability: development - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - // Stability: development - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - // Stability: development - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - // Stability: development - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - // Stability: development - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - // Stability: development - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -// Enum values for rpc.grpc.status_code -var ( - // OK - // Stability: development - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - // Stability: development - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - // Stability: development - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - // Stability: development - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - // Stability: development - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - // Stability: development - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - // Stability: development - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - // Stability: development - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - // Stability: development - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - // Stability: development - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - // Stability: development - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - // Stability: development - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - // Stability: development - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - // Stability: development - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - // Stability: development - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - // Stability: development - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - // Stability: development - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Enum values for rpc.message.type -var ( - // sent - // Stability: development - RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") - // received - // Stability: development - RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") -) - -// Enum values for rpc.system -var ( - // gRPC - // Stability: development - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - // Stability: development - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - // Stability: development - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - // Stability: development - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - // Stability: development - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// Namespace: security_rule -const ( - // SecurityRuleCategoryKey is the attribute Key conforming to the - // "security_rule.category" semantic conventions. It represents a categorization - // value keyword used by the entity using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Attempted Information Leak" - SecurityRuleCategoryKey = attribute.Key("security_rule.category") - - // SecurityRuleDescriptionKey is the attribute Key conforming to the - // "security_rule.description" semantic conventions. It represents the - // description of the rule generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Block requests to public DNS over HTTPS / TLS protocols" - SecurityRuleDescriptionKey = attribute.Key("security_rule.description") - - // SecurityRuleLicenseKey is the attribute Key conforming to the - // "security_rule.license" semantic conventions. It represents the name of the - // license under which the rule used to generate this event is made available. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Apache 2.0" - SecurityRuleLicenseKey = attribute.Key("security_rule.license") - - // SecurityRuleNameKey is the attribute Key conforming to the - // "security_rule.name" semantic conventions. It represents the name of the rule - // or signature generating the event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "BLOCK_DNS_over_TLS" - SecurityRuleNameKey = attribute.Key("security_rule.name") - - // SecurityRuleReferenceKey is the attribute Key conforming to the - // "security_rule.reference" semantic conventions. It represents the reference - // URL to additional information about the rule used to generate this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" - // Note: The URL can point to the vendor’s documentation about the rule. If - // that’s not available, it can also be a link to a more general page - // describing this type of alert. - SecurityRuleReferenceKey = attribute.Key("security_rule.reference") - - // SecurityRuleRulesetNameKey is the attribute Key conforming to the - // "security_rule.ruleset.name" semantic conventions. It represents the name of - // the ruleset, policy, group, or parent category in which the rule used to - // generate this event is a member. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Standard_Protocol_Filters" - SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") - - // SecurityRuleUUIDKey is the attribute Key conforming to the - // "security_rule.uuid" semantic conventions. It represents a rule ID that is - // unique within the scope of a set or group of agents, observers, or other - // entities using the rule for detection of this event. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" - SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") - - // SecurityRuleVersionKey is the attribute Key conforming to the - // "security_rule.version" semantic conventions. It represents the version / - // revision of the rule being used for analysis. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.0.0" - SecurityRuleVersionKey = attribute.Key("security_rule.version") -) - -// SecurityRuleCategory returns an attribute KeyValue conforming to the -// "security_rule.category" semantic conventions. It represents a categorization -// value keyword used by the entity using the rule for detection of this event. -func SecurityRuleCategory(val string) attribute.KeyValue { - return SecurityRuleCategoryKey.String(val) -} - -// SecurityRuleDescription returns an attribute KeyValue conforming to the -// "security_rule.description" semantic conventions. It represents the -// description of the rule generating the event. -func SecurityRuleDescription(val string) attribute.KeyValue { - return SecurityRuleDescriptionKey.String(val) -} - -// SecurityRuleLicense returns an attribute KeyValue conforming to the -// "security_rule.license" semantic conventions. It represents the name of the -// license under which the rule used to generate this event is made available. -func SecurityRuleLicense(val string) attribute.KeyValue { - return SecurityRuleLicenseKey.String(val) -} - -// SecurityRuleName returns an attribute KeyValue conforming to the -// "security_rule.name" semantic conventions. It represents the name of the rule -// or signature generating the event. -func SecurityRuleName(val string) attribute.KeyValue { - return SecurityRuleNameKey.String(val) -} - -// SecurityRuleReference returns an attribute KeyValue conforming to the -// "security_rule.reference" semantic conventions. It represents the reference -// URL to additional information about the rule used to generate this event. -func SecurityRuleReference(val string) attribute.KeyValue { - return SecurityRuleReferenceKey.String(val) -} - -// SecurityRuleRulesetName returns an attribute KeyValue conforming to the -// "security_rule.ruleset.name" semantic conventions. It represents the name of -// the ruleset, policy, group, or parent category in which the rule used to -// generate this event is a member. -func SecurityRuleRulesetName(val string) attribute.KeyValue { - return SecurityRuleRulesetNameKey.String(val) -} - -// SecurityRuleUUID returns an attribute KeyValue conforming to the -// "security_rule.uuid" semantic conventions. It represents a rule ID that is -// unique within the scope of a set or group of agents, observers, or other -// entities using the rule for detection of this event. -func SecurityRuleUUID(val string) attribute.KeyValue { - return SecurityRuleUUIDKey.String(val) -} - -// SecurityRuleVersion returns an attribute KeyValue conforming to the -// "security_rule.version" semantic conventions. It represents the version / -// revision of the rule being used for analysis. -func SecurityRuleVersion(val string) attribute.KeyValue { - return SecurityRuleVersionKey.String(val) -} - -// Namespace: server -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.address` SHOULD represent the server address behind any - // intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" semantic - // conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through an - // intermediary, `server.port` SHOULD represent the server port behind any - // intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the "server.address" -// semantic conventions. It represents the server domain name if available -// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// Namespace: service -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID of - // the service instance. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "627cc493-f310-47de-96bd-71410b7dec09" - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to - // distinguish instances of the same service that exist at the same time (e.g. - // instances of a horizontally scaled - // service). - // - // Implementations, such as SDKs, are recommended to generate a random Version 1 - // or Version 4 [RFC - // 4122] UUID, but are free to use an inherent unique ID as - // the source of - // this value if stability is desirable. In that case, the ID SHOULD be used as - // source of a UUID Version 5 and - // SHOULD use the following UUID as the namespace: - // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. - // - // UUIDs are typically recommended, as only an opaque value for the purposes of - // identifying a service instance is - // needed. Similar to what can be seen in the man page for the - // [`/etc/machine-id`] file, the underlying - // data, such as pod name and namespace should be treated as confidential, being - // the user's choice to expose it - // or not via another resource attribute. - // - // For applications running behind an application server (like unicorn), we do - // not recommend using one identifier - // for all processes participating in the application. Instead, it's recommended - // each division (e.g. a worker - // thread in unicorn) to have its own instance.id. - // - // It's not recommended for a Collector to set `service.instance.id` if it can't - // unambiguously determine the - // service instance that is generating that telemetry. For instance, creating an - // UUID based on `pod.name` will - // likely be wrong, as the Collector might not know from which container within - // that pod the telemetry originated. - // However, Collectors can set the `service.instance.id` if they can - // unambiguously determine the service instance - // for that telemetry. This is typically the case for scraping receivers, as - // they know the target address and - // port. - // - // [RFC - // 4122]: https://www.ietf.org/rfc/rfc4122.txt - // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNameKey is the attribute Key conforming to the "service.name" semantic - // conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "shoppingcart" - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. - // If `process.executable.name` is not available, the value MUST be set to - // `unknown_service`. - // - // [`process.executable.name`]: process.md - ServiceNameKey = attribute.Key("service.name") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Shop" - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace - // defined (so the empty/unspecified namespace is simply one more valid - // namespace). Zero-length namespace string is assumed equal to unspecified - // namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - - // ServiceVersionKey is the attribute Key conforming to the "service.version" - // semantic conventions. It represents the version string of the service API or - // implementation. The format is not defined by these conventions. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "2.0.0", "a01dbef8a" - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of the -// service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceName returns an attribute KeyValue conforming to the "service.name" -// semantic conventions. It represents the logical name of the service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// Namespace: session -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" semantic - // conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 00112233-4455-6677-8899-aabbccddeeff - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} - -// Namespace: signalr -const ( - // SignalRConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the signalR - // HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "app_shutdown", "timeout" - SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalRTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the - // [SignalR transport type]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "web_sockets", "long_polling" - // - // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md - SignalRTransportKey = attribute.Key("signalr.transport") -) - -// Enum values for signalr.connection.status -var ( - // The connection was closed normally. - // Stability: stable - SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout. - // Stability: stable - SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down. - // Stability: stable - SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") -) - -// Enum values for signalr.transport -var ( - // ServerSentEvents protocol - // Stability: stable - SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") - // LongPolling protocol - // Stability: stable - SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") - // WebSockets protocol - // Stability: stable - SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") -) - -// Namespace: source -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix domain - // socket name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" - // Note: When observed from the destination side, and when communicating through - // an intermediary, `source.address` SHOULD represent the source address behind - // any intermediaries, for example proxies, if it's available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" semantic - // conventions. It represents the source port number. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the "source.address" -// semantic conventions. It represents the source address - domain name if -// available without reverse DNS lookup; otherwise, IP address or Unix domain -// socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number. -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Namespace: system -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // deprecated, use `cpu.logical_number` instead. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "(identifier)" - SystemDeviceKey = attribute.Key("system.device") - - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the filesystem - // mode. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "rw, ro" - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/mnt/data" - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the filesystem - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "used" - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the filesystem - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ext4" - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") - - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free", "cached" - SystemMemoryStateKey = attribute.Key("system.memory.state") - - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "in" - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory paging - // state. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "free" - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory paging - // type. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "minor" - SystemPagingTypeKey = attribute.Key("system.paging.type") - - // SystemProcessStatusKey is the attribute Key conforming to the - // "system.process.status" semantic conventions. It represents the process - // state, e.g., [Linux Process State Codes]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "running" - // - // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES - SystemProcessStatusKey = attribute.Key("system.process.status") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the -// deprecated, use `cpu.logical_number` instead. -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// SystemDevice returns an attribute KeyValue conforming to the "system.device" -// semantic conventions. It represents the device identifier. -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode. -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the -// "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path. -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Enum values for system.filesystem.state -var ( - // used - // Stability: development - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - // Stability: development - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - // Stability: development - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -// Enum values for system.filesystem.type -var ( - // fat32 - // Stability: development - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - // Stability: development - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - // Stability: development - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - // Stability: development - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - // Stability: development - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - // Stability: development - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// Enum values for system.memory.state -var ( - // used - // Stability: development - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - // Stability: development - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // Deprecated: Removed, report shared memory usage with - // `metric.system.memory.shared` metric. - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - // Stability: development - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - // Stability: development - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Enum values for system.paging.direction -var ( - // in - // Stability: development - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - // Stability: development - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -// Enum values for system.paging.state -var ( - // used - // Stability: development - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - // Stability: development - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -// Enum values for system.paging.type -var ( - // major - // Stability: development - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - // Stability: development - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Enum values for system.process.status -var ( - // running - // Stability: development - SystemProcessStatusRunning = SystemProcessStatusKey.String("running") - // sleeping - // Stability: development - SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") - // stopped - // Stability: development - SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") - // defunct - // Stability: development - SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") -) - -// Namespace: telemetry -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of the - // auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "parts-unlimited-java" - // Note: Official auto instrumentation agents and distributions SHOULD set the - // `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the version - // string of the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2.3" - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") - - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the language of - // the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "opentelemetry" - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to - // `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is used, - // this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module name of - // this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "1.2.3" - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version string -// of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// Enum values for telemetry.sdk.language -var ( - // cpp - // Stability: stable - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - // Stability: stable - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - // Stability: stable - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - // Stability: stable - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - // Stability: stable - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - // Stability: stable - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - // Stability: stable - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - // Stability: stable - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - // Stability: stable - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - // Stability: stable - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - // Stability: stable - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - // Stability: stable - TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") -) - -// Namespace: test -const ( - // TestCaseNameKey is the attribute Key conforming to the "test.case.name" - // semantic conventions. It represents the fully qualified human readable name - // of the [test case]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", - // "ExampleTestCase1_test1" - // - // [test case]: https://wikipedia.org/wiki/Test_case - TestCaseNameKey = attribute.Key("test.case.name") - - // TestCaseResultStatusKey is the attribute Key conforming to the - // "test.case.result.status" semantic conventions. It represents the status of - // the actual test case result from test execution. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "pass", "fail" - TestCaseResultStatusKey = attribute.Key("test.case.result.status") - - // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" - // semantic conventions. It represents the human readable name of a [test suite] - // . - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TestSuite1" - // - // [test suite]: https://wikipedia.org/wiki/Test_suite - TestSuiteNameKey = attribute.Key("test.suite.name") - - // TestSuiteRunStatusKey is the attribute Key conforming to the - // "test.suite.run.status" semantic conventions. It represents the status of the - // test suite run. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "success", "failure", "skipped", "aborted", "timed_out", - // "in_progress" - TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") -) - -// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" -// semantic conventions. It represents the fully qualified human readable name of -// the [test case]. -// -// [test case]: https://wikipedia.org/wiki/Test_case -func TestCaseName(val string) attribute.KeyValue { - return TestCaseNameKey.String(val) -} - -// TestSuiteName returns an attribute KeyValue conforming to the -// "test.suite.name" semantic conventions. It represents the human readable name -// of a [test suite]. -// -// [test suite]: https://wikipedia.org/wiki/Test_suite -func TestSuiteName(val string) attribute.KeyValue { - return TestSuiteNameKey.String(val) -} - -// Enum values for test.case.result.status -var ( - // pass - // Stability: development - TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") - // fail - // Stability: development - TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") -) - -// Enum values for test.suite.run.status -var ( - // success - // Stability: development - TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") - // failure - // Stability: development - TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") - // skipped - // Stability: development - TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") - // aborted - // Stability: development - TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") - // timed_out - // Stability: development - TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") - // in_progress - // Stability: development - TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") -) - -// Namespace: thread -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed to OS - // thread ID). - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic - // conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: main - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic -// conventions. It represents the current "managed" thread ID (as opposed to OS -// thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Namespace: tls -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic - // conventions. It represents the string indicating the [cipher] used during the - // current connection. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" - // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` - // of the [registered TLS Cipher Suits]. - // - // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 - // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the PEM-encoded - // stand-alone certificate offered by the client. This is usually - // mutually-exclusive of `client.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // client. This is usually mutually-exclusive of `client.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the client. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the date/Time - // indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the array - // of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the given - // cipher, when applicable. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "secp256r1" - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the "tls.established" - // semantic conventions. It represents the boolean flag indicating if the TLS - // negotiation was successful and transitioned to an encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" - // semantic conventions. It represents the string indicating the protocol being - // tunneled. Per the values in the [IANA registry], this string should be lower - // case. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "http/1.1" - // - // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" - // semantic conventions. It represents the normalized lowercase protocol name - // parsed from original string of the negotiated [SSL/TLS protocol version]. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // - // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric part - // of the version parsed from the original string of the negotiated - // [SSL/TLS protocol version]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1.2", "3" - // - // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic - // conventions. It represents the boolean flag indicating if this TLS connection - // was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: true - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the PEM-encoded - // stand-alone certificate offered by the server. This is usually - // mutually-exclusive of `server.certificate_chain` since this value also exists - // in that list. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII..." - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the array - // of PEM-encoded certificates that make up the certificate chain offered by the - // server. This is usually mutually-exclusive of `server.certificate` since that - // value should be the first certificate in the chain. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "MII...", "MI..." - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the certificate - // fingerprint using the MD5 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the certificate - // fingerprint using the SHA1 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the certificate - // fingerprint using the SHA256 digest of DER-encoded version of certificate - // offered by the server. For consistency with other hash values, this value - // should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" - // semantic conventions. It represents the distinguished name of [subject] of - // the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" - // - // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" - // semantic conventions. It represents a hash that identifies servers based on - // how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "d4e5b18d6b55c71272893221c96ba240" - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "2021-01-01T00:00:00.000Z" - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the date/Time - // indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "1970-01-01T00:00:00.000Z" - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the distinguished - // name of subject of the x.509 certificate presented by the server. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the [cipher] used -// during the current connection. -// -// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the PEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also exists -// in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// client. This is usually mutually-exclusive of `client.certificate` since that -// value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the client. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" -// semantic conventions. It represents a hash that identifies clients based on -// how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic -// conventions. It represents the string indicating the curve used for the given -// cipher, when applicable. -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string indicating -// the protocol being tunneled. Per the values in the [IANA registry], this -// string should be lower case. -// -// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part of -// the version parsed from the original string of the negotiated -// [SSL/TLS protocol version]. -// -// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the PEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also exists -// in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by the -// server. This is usually mutually-exclusive of `server.certificate` since that -// value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate offered -// by the server. For consistency with other hash values, this value should be -// formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished name -// of [subject] of the issuer of the x.509 certificate presented by the client. -// -// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Enum values for tls.protocol.name -var ( - // ssl - // Stability: development - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - // Stability: development - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// Namespace: url -const ( - // URLDomainKey is the attribute Key conforming to the "url.domain" semantic - // conventions. It represents the domain extracted from the `url.full`, such as - // "opentelemetry.io". - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", - // "[1080:0:0:0:8:800:200C:417A]" - // Note: In some cases a URL may refer to an IP and/or port directly, without a - // domain name. In this case, the IP address would go to the domain field. If - // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` - // and `]` characters should also be captured in the domain field. - // - // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 - URLDomainKey = attribute.Key("url.domain") - - // URLExtensionKey is the attribute Key conforming to the "url.extension" - // semantic conventions. It represents the file extension extracted from the - // `url.full`, excluding the leading dot. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "png", "gz" - // Note: The file extension is only set if it exists, as not every url has a - // file extension. When the file name has multiple extensions `example.tar.gz`, - // only the last one should be captured `gz`, not `tar.gz`. - URLExtensionKey = attribute.Key("url.extension") - - // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic - // conventions. It represents the [URI fragment] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "SemConv" - // - // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network resource - // according to [RFC3986]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the fragment - // is not transmitted over HTTP, but if it is known, it SHOULD be included - // nevertheless. - // - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. - // In such case username and password SHOULD be redacted and attribute's value - // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. - // - // `url.full` SHOULD capture the absolute URL when it is available (or can be - // reconstructed). - // - // Sensitive content provided in `url.full` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the - // value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `https://www.example.com/path?color=blue&sig=REDACTED`. - // - // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLFullKey = attribute.Key("url.full") - - // URLOriginalKey is the attribute Key conforming to the "url.original" semantic - // conventions. It represents the unmodified original URL as seen in the event - // source. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", - // "search?q=OpenTelemetry" - // Note: In network monitoring, the observed URL may be a full URL, whereas in - // access logs, the URL is often just represented as a path. This field is meant - // to represent the URL as it was observed, complete or not. - // `url.original` might contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case password and - // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. - URLOriginalKey = attribute.Key("url.original") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI path] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "/search" - // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when - // instrumentations can identify it. - // - // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 - URLPathKey = attribute.Key("url.path") - - // URLPortKey is the attribute Key conforming to the "url.port" semantic - // conventions. It represents the port extracted from the `url.full`. - // - // Type: int - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: 443 - URLPortKey = attribute.Key("url.port") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI query] component. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "q=OpenTelemetry" - // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when - // instrumentations can identify it. - // - // - // Query string values for the following keys SHOULD be redacted by default and - // replaced by the value `REDACTED`: - // - // - [`AWSAccessKeyId`] - // - [`Signature`] - // - [`sig`] - // - [`X-Goog-Signature`] - // - // This list is subject to change over time. - // - // When a query string value is redacted, the query string key SHOULD still be - // preserved, e.g. - // `q=OpenTelemetry&sig=REDACTED`. - // - // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 - // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth - // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token - // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls - URLQueryKey = attribute.Key("url.query") - - // URLRegisteredDomainKey is the attribute Key conforming to the - // "url.registered_domain" semantic conventions. It represents the highest - // registered url domain, stripped of the subdomain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "example.com", "foo.co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // For example, the registered domain for `foo.example.com` is `example.com`. - // Trying to approximate this by simply taking the last two labels will not work - // well for TLDs such as `co.uk`. - // - // [public suffix list]: https://publicsuffix.org/ - URLRegisteredDomainKey = attribute.Key("url.registered_domain") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic - // conventions. It represents the [URI scheme] component identifying the used - // protocol. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "https", "ftp", "telnet" - // - // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 - URLSchemeKey = attribute.Key("url.scheme") - - // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" - // semantic conventions. It represents the subdomain portion of a fully - // qualified domain name includes all of the names except the host name under - // the registered_domain. In a partially qualified domain, or if the - // qualification level of the full name cannot be determined, subdomain contains - // all of the names below the registered domain. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "east", "sub2.sub1" - // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the - // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the - // subdomain field should contain `sub2.sub1`, with no trailing period. - URLSubdomainKey = attribute.Key("url.subdomain") - - // URLTemplateKey is the attribute Key conforming to the "url.template" semantic - // conventions. It represents the low-cardinality template of an - // [absolute path reference]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "/users/{id}", "/users/:id", "/users?id={id}" - // - // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 - URLTemplateKey = attribute.Key("url.template") - - // URLTopLevelDomainKey is the attribute Key conforming to the - // "url.top_level_domain" semantic conventions. It represents the effective top - // level domain (eTLD), also known as the domain suffix, is the last part of the - // domain name. For example, the top level domain for example.com is `com`. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "com", "co.uk" - // Note: This value can be determined precisely with the [public suffix list]. - // - // [public suffix list]: https://publicsuffix.org/ - URLTopLevelDomainKey = attribute.Key("url.top_level_domain") -) - -// URLDomain returns an attribute KeyValue conforming to the "url.domain" -// semantic conventions. It represents the domain extracted from the `url.full`, -// such as "opentelemetry.io". -func URLDomain(val string) attribute.KeyValue { - return URLDomainKey.String(val) -} - -// URLExtension returns an attribute KeyValue conforming to the "url.extension" -// semantic conventions. It represents the file extension extracted from the -// `url.full`, excluding the leading dot. -func URLExtension(val string) attribute.KeyValue { - return URLExtensionKey.String(val) -} - -// URLFragment returns an attribute KeyValue conforming to the "url.fragment" -// semantic conventions. It represents the [URI fragment] component. -// -// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" semantic -// conventions. It represents the absolute URL describing a network resource -// according to [RFC3986]. -// -// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLOriginal returns an attribute KeyValue conforming to the "url.original" -// semantic conventions. It represents the unmodified original URL as seen in the -// event source. -func URLOriginal(val string) attribute.KeyValue { - return URLOriginalKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" semantic -// conventions. It represents the [URI path] component. -// -// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLPort returns an attribute KeyValue conforming to the "url.port" semantic -// conventions. It represents the port extracted from the `url.full`. -func URLPort(val int) attribute.KeyValue { - return URLPortKey.Int(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic -// conventions. It represents the [URI query] component. -// -// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLRegisteredDomain returns an attribute KeyValue conforming to the -// "url.registered_domain" semantic conventions. It represents the highest -// registered url domain, stripped of the subdomain. -func URLRegisteredDomain(val string) attribute.KeyValue { - return URLRegisteredDomainKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI scheme] component identifying the -// used protocol. -// -// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" -// semantic conventions. It represents the subdomain portion of a fully qualified -// domain name includes all of the names except the host name under the -// registered_domain. In a partially qualified domain, or if the qualification -// level of the full name cannot be determined, subdomain contains all of the -// names below the registered domain. -func URLSubdomain(val string) attribute.KeyValue { - return URLSubdomainKey.String(val) -} - -// URLTemplate returns an attribute KeyValue conforming to the "url.template" -// semantic conventions. It represents the low-cardinality template of an -// [absolute path reference]. -// -// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 -func URLTemplate(val string) attribute.KeyValue { - return URLTemplateKey.String(val) -} - -// URLTopLevelDomain returns an attribute KeyValue conforming to the -// "url.top_level_domain" semantic conventions. It represents the effective top -// level domain (eTLD), also known as the domain suffix, is the last part of the -// domain name. For example, the top level domain for example.com is `com`. -func URLTopLevelDomain(val string) attribute.KeyValue { - return URLTopLevelDomainKey.String(val) -} - -// Namespace: user -const ( - // UserEmailKey is the attribute Key conforming to the "user.email" semantic - // conventions. It represents the user email address. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein@example.com" - UserEmailKey = attribute.Key("user.email") - - // UserFullNameKey is the attribute Key conforming to the "user.full_name" - // semantic conventions. It represents the user's full name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Albert Einstein" - UserFullNameKey = attribute.Key("user.full_name") - - // UserHashKey is the attribute Key conforming to the "user.hash" semantic - // conventions. It represents the unique user hash to correlate information for - // a user in anonymized form. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" - // Note: Useful if `user.id` or `user.name` contain confidential information and - // cannot be used. - UserHashKey = attribute.Key("user.hash") - - // UserIDKey is the attribute Key conforming to the "user.id" semantic - // conventions. It represents the unique identifier of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" - UserIDKey = attribute.Key("user.id") - - // UserNameKey is the attribute Key conforming to the "user.name" semantic - // conventions. It represents the short name or login/username of the user. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "a.einstein" - UserNameKey = attribute.Key("user.name") - - // UserRolesKey is the attribute Key conforming to the "user.roles" semantic - // conventions. It represents the array of user roles at the time of the event. - // - // Type: string[] - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "admin", "reporting_user" - UserRolesKey = attribute.Key("user.roles") -) - -// UserEmail returns an attribute KeyValue conforming to the "user.email" -// semantic conventions. It represents the user email address. -func UserEmail(val string) attribute.KeyValue { - return UserEmailKey.String(val) -} - -// UserFullName returns an attribute KeyValue conforming to the "user.full_name" -// semantic conventions. It represents the user's full name. -func UserFullName(val string) attribute.KeyValue { - return UserFullNameKey.String(val) -} - -// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic -// conventions. It represents the unique user hash to correlate information for a -// user in anonymized form. -func UserHash(val string) attribute.KeyValue { - return UserHashKey.String(val) -} - -// UserID returns an attribute KeyValue conforming to the "user.id" semantic -// conventions. It represents the unique identifier of the user. -func UserID(val string) attribute.KeyValue { - return UserIDKey.String(val) -} - -// UserName returns an attribute KeyValue conforming to the "user.name" semantic -// conventions. It represents the short name or login/username of the user. -func UserName(val string) attribute.KeyValue { - return UserNameKey.String(val) -} - -// UserRoles returns an attribute KeyValue conforming to the "user.roles" -// semantic conventions. It represents the array of user roles at the time of the -// event. -func UserRoles(val ...string) attribute.KeyValue { - return UserRolesKey.StringSlice(val) -} - -// Namespace: user_agent -const ( - // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" - // semantic conventions. It represents the name of the user-agent extracted from - // original. Usually refers to the browser's name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Safari", "YourApp" - // Note: [Example] of extracting browser's name from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant name SHOULD be selected. In such a scenario it should align with - // `user_agent.version` - // - // [Example]: https://www.whatsmyua.info - UserAgentNameKey = attribute.Key("user_agent.name") - - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of the - // [HTTP User-Agent] header sent by the client. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Stable - // - // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 - // grpc-java-okhttp/1.27.2" - // - // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent - UserAgentOriginalKey = attribute.Key("user_agent.original") - - // UserAgentOSNameKey is the attribute Key conforming to the - // "user_agent.os.name" semantic conventions. It represents the human readable - // operating system name. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "iOS", "Android", "Ubuntu" - // Note: For mapping user agent strings to OS names, libraries such as - // [ua-parser] can be utilized. - // - // [ua-parser]: https://github.com/ua-parser - UserAgentOSNameKey = attribute.Key("user_agent.os.name") - - // UserAgentOSVersionKey is the attribute Key conforming to the - // "user_agent.os.version" semantic conventions. It represents the version - // string of the operating system as defined in [Version Attributes]. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.2.1", "18.04.1" - // Note: For mapping user agent strings to OS versions, libraries such as - // [ua-parser] can be utilized. - // - // [Version Attributes]: /docs/resource/README.md#version-attributes - // [ua-parser]: https://github.com/ua-parser - UserAgentOSVersionKey = attribute.Key("user_agent.os.version") - - // UserAgentSyntheticTypeKey is the attribute Key conforming to the - // "user_agent.synthetic.type" semantic conventions. It represents the specifies - // the category of synthetic traffic, such as tests or bots. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // Note: This attribute MAY be derived from the contents of the - // `user_agent.original` attribute. Components that populate the attribute are - // responsible for determining what they consider to be synthetic bot or test - // traffic. This attribute can either be set for self-identification purposes, - // or on telemetry detected to be generated as a result of a synthetic request. - // This attribute is useful for distinguishing between genuine client traffic - // and synthetic traffic generated by bots or tests. - UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") - - // UserAgentVersionKey is the attribute Key conforming to the - // "user_agent.version" semantic conventions. It represents the version of the - // user-agent extracted from original. Usually refers to the browser's version. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "14.1.2", "1.0.0" - // Note: [Example] of extracting browser's version from original string. In the - // case of using a user-agent for non-browser products, such as microservices - // with multiple names/versions inside the `user_agent.original`, the most - // significant version SHOULD be selected. In such a scenario it should align - // with `user_agent.name` - // - // [Example]: https://www.whatsmyua.info - UserAgentVersionKey = attribute.Key("user_agent.version") -) - -// UserAgentName returns an attribute KeyValue conforming to the -// "user_agent.name" semantic conventions. It represents the name of the -// user-agent extracted from original. Usually refers to the browser's name. -func UserAgentName(val string) attribute.KeyValue { - return UserAgentNameKey.String(val) -} - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP User-Agent] header sent by the client. -// -// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// UserAgentOSName returns an attribute KeyValue conforming to the -// "user_agent.os.name" semantic conventions. It represents the human readable -// operating system name. -func UserAgentOSName(val string) attribute.KeyValue { - return UserAgentOSNameKey.String(val) -} - -// UserAgentOSVersion returns an attribute KeyValue conforming to the -// "user_agent.os.version" semantic conventions. It represents the version string -// of the operating system as defined in [Version Attributes]. -// -// [Version Attributes]: /docs/resource/README.md#version-attributes -func UserAgentOSVersion(val string) attribute.KeyValue { - return UserAgentOSVersionKey.String(val) -} - -// UserAgentVersion returns an attribute KeyValue conforming to the -// "user_agent.version" semantic conventions. It represents the version of the -// user-agent extracted from original. Usually refers to the browser's version. -func UserAgentVersion(val string) attribute.KeyValue { - return UserAgentVersionKey.String(val) -} - -// Enum values for user_agent.synthetic.type -var ( - // Bot source. - // Stability: development - UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") - // Synthetic test source. - // Stability: development - UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") -) - -// Namespace: vcs -const ( - // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" - // semantic conventions. It represents the ID of the change (pull request/merge - // request/changelist) if applicable. This is usually a unique (within - // repository) identifier generated by the VCS system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "123" - VCSChangeIDKey = attribute.Key("vcs.change.id") - - // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" - // semantic conventions. It represents the state of the change (pull - // request/merge request/changelist). - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "open", "closed", "merged" - VCSChangeStateKey = attribute.Key("vcs.change.state") - - // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" - // semantic conventions. It represents the human readable title of the change - // (pull request/merge request/changelist). This title is often a brief summary - // of the change and may get merged in to a ref as the commit summary. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update - // dependency" - VCSChangeTitleKey = attribute.Key("vcs.change.title") - - // VCSLineChangeTypeKey is the attribute Key conforming to the - // "vcs.line_change.type" semantic conventions. It represents the type of line - // change being measured on a branch or change. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "added", "removed" - VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") - - // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" - // semantic conventions. It represents the group owner within the version - // control system. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-org", "myteam", "business-unit" - VCSOwnerNameKey = attribute.Key("vcs.owner.name") - - // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" - // semantic conventions. It represents the name of the version control system - // provider. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "github", "gitlab", "gitea", "bitbucket" - VCSProviderNameKey = attribute.Key("vcs.provider.name") - - // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") - - // VCSRefBaseRevisionKey is the attribute Key conforming to the - // "vcs.ref.base.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. The - // revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.base.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") - - // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `base` refers to the starting point of a change. For example, `main` - // would be the base reference of type branch if you've created a new - // reference of type branch from it and created new commits. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") - - // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" - // semantic conventions. It represents the name of the [reference] such as - // **branch** or **tag** in the repository. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "my-feature-branch", "tag-1-test" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") - - // VCSRefHeadRevisionKey is the attribute Key conforming to the - // "vcs.ref.head.revision" semantic conventions. It represents the revision, - // literally [revised version], The revision most often refers to a commit - // object in Git, or a revision number in SVN. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", - // "main", "123", "HEAD" - // Note: `head` refers to where you are right now; the current reference at a - // given time.The revision can be a full [hash value (see - // glossary)], - // of the recorded change to a ref within a repository pointing to a - // commit [commit] object. It does - // not necessarily have to be a hash; it can simply define a [revision - // number] - // which is an integer that is monotonically increasing. In cases where - // it is identical to the `ref.head.name`, it SHOULD still be included. - // It is up to the implementer to decide which value to set as the - // revision based on the VCS system and situational context. - // - // [revised version]: https://www.merriam-webster.com/dictionary/revision - // [hash value (see - // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf - // [commit]: https://git-scm.com/docs/git-commit - // [revision - // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html - VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") - - // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" - // semantic conventions. It represents the type of the [reference] in the - // repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // Note: `head` refers to where you are right now; the current reference at a - // given time. - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") - - // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic - // conventions. It represents the type of the [reference] in the repository. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "branch", "tag" - // - // [reference]: https://git-scm.com/docs/gitglossary#def_ref - VCSRefTypeKey = attribute.Key("vcs.ref.type") - - // VCSRepositoryNameKey is the attribute Key conforming to the - // "vcs.repository.name" semantic conventions. It represents the human readable - // name of the repository. It SHOULD NOT include any additional identifier like - // Group/SubGroup in GitLab or organization in GitHub. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "semantic-conventions", "my-cool-repo" - // Note: Due to it only being the name, it can clash with forks of the same - // repository if collecting telemetry across multiple orgs or groups in - // the same backends. - VCSRepositoryNameKey = attribute.Key("vcs.repository.name") - - // VCSRepositoryURLFullKey is the attribute Key conforming to the - // "vcs.repository.url.full" semantic conventions. It represents the - // [canonical URL] of the repository providing the complete HTTP(S) address in - // order to locate and identify the repository through a browser. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: - // "https://github.com/opentelemetry/open-telemetry-collector-contrib", - // "https://gitlab.com/my-org/my-project/my-projects-project/repo" - // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include - // the `.git` extension. - // - // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. - VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") - - // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the - // "vcs.revision_delta.direction" semantic conventions. It represents the type - // of revision comparison. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "ahead", "behind" - VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") -) - -// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" -// semantic conventions. It represents the ID of the change (pull request/merge -// request/changelist) if applicable. This is usually a unique (within -// repository) identifier generated by the VCS system. -func VCSChangeID(val string) attribute.KeyValue { - return VCSChangeIDKey.String(val) -} - -// VCSChangeTitle returns an attribute KeyValue conforming to the -// "vcs.change.title" semantic conventions. It represents the human readable -// title of the change (pull request/merge request/changelist). This title is -// often a brief summary of the change and may get merged in to a ref as the -// commit summary. -func VCSChangeTitle(val string) attribute.KeyValue { - return VCSChangeTitleKey.String(val) -} - -// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" -// semantic conventions. It represents the group owner within the version control -// system. -func VCSOwnerName(val string) attribute.KeyValue { - return VCSOwnerNameKey.String(val) -} - -// VCSRefBaseName returns an attribute KeyValue conforming to the -// "vcs.ref.base.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefBaseName(val string) attribute.KeyValue { - return VCSRefBaseNameKey.String(val) -} - -// VCSRefBaseRevision returns an attribute KeyValue conforming to the -// "vcs.ref.base.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefBaseRevision(val string) attribute.KeyValue { - return VCSRefBaseRevisionKey.String(val) -} - -// VCSRefHeadName returns an attribute KeyValue conforming to the -// "vcs.ref.head.name" semantic conventions. It represents the name of the -// [reference] such as **branch** or **tag** in the repository. -// -// [reference]: https://git-scm.com/docs/gitglossary#def_ref -func VCSRefHeadName(val string) attribute.KeyValue { - return VCSRefHeadNameKey.String(val) -} - -// VCSRefHeadRevision returns an attribute KeyValue conforming to the -// "vcs.ref.head.revision" semantic conventions. It represents the revision, -// literally [revised version], The revision most often refers to a commit object -// in Git, or a revision number in SVN. -// -// [revised version]: https://www.merriam-webster.com/dictionary/revision -func VCSRefHeadRevision(val string) attribute.KeyValue { - return VCSRefHeadRevisionKey.String(val) -} - -// VCSRepositoryName returns an attribute KeyValue conforming to the -// "vcs.repository.name" semantic conventions. It represents the human readable -// name of the repository. It SHOULD NOT include any additional identifier like -// Group/SubGroup in GitLab or organization in GitHub. -func VCSRepositoryName(val string) attribute.KeyValue { - return VCSRepositoryNameKey.String(val) -} - -// VCSRepositoryURLFull returns an attribute KeyValue conforming to the -// "vcs.repository.url.full" semantic conventions. It represents the -// [canonical URL] of the repository providing the complete HTTP(S) address in -// order to locate and identify the repository through a browser. -// -// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. -func VCSRepositoryURLFull(val string) attribute.KeyValue { - return VCSRepositoryURLFullKey.String(val) -} - -// Enum values for vcs.change.state -var ( - // Open means the change is currently active and under review. It hasn't been - // merged into the target branch yet, and it's still possible to make changes or - // add comments. - // Stability: development - VCSChangeStateOpen = VCSChangeStateKey.String("open") - // WIP (work-in-progress, draft) means the change is still in progress and not - // yet ready for a full review. It might still undergo significant changes. - // Stability: development - VCSChangeStateWip = VCSChangeStateKey.String("wip") - // Closed means the merge request has been closed without merging. This can - // happen for various reasons, such as the changes being deemed unnecessary, the - // issue being resolved in another way, or the author deciding to withdraw the - // request. - // Stability: development - VCSChangeStateClosed = VCSChangeStateKey.String("closed") - // Merged indicates that the change has been successfully integrated into the - // target codebase. - // Stability: development - VCSChangeStateMerged = VCSChangeStateKey.String("merged") -) - -// Enum values for vcs.line_change.type -var ( - // How many lines were added. - // Stability: development - VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") - // How many lines were removed. - // Stability: development - VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") -) - -// Enum values for vcs.provider.name -var ( - // [GitHub] - // Stability: development - // - // [GitHub]: https://github.com - VCSProviderNameGithub = VCSProviderNameKey.String("github") - // [GitLab] - // Stability: development - // - // [GitLab]: https://gitlab.com - VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") - // Deprecated: Replaced by `gitea`. - VCSProviderNameGittea = VCSProviderNameKey.String("gittea") - // [Gitea] - // Stability: development - // - // [Gitea]: https://gitea.io - VCSProviderNameGitea = VCSProviderNameKey.String("gitea") - // [Bitbucket] - // Stability: development - // - // [Bitbucket]: https://bitbucket.org - VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") -) - -// Enum values for vcs.ref.base.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") -) - -// Enum values for vcs.ref.head.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") -) - -// Enum values for vcs.ref.type -var ( - // [branch] - // Stability: development - // - // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch - VCSRefTypeBranch = VCSRefTypeKey.String("branch") - // [tag] - // Stability: development - // - // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag - VCSRefTypeTag = VCSRefTypeKey.String("tag") -) - -// Enum values for vcs.revision_delta.direction -var ( - // How many revisions the change is behind the target ref. - // Stability: development - VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") - // How many revisions the change is ahead of the target ref. - // Stability: development - VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") -) - -// Namespace: webengine -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the additional - // description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final" - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "WildFly" - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of the - // web engine. - // - // Type: string - // RequirementLevel: Recommended - // Stability: Development - // - // Examples: "21.0.0" - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" -// semantic conventions. It represents the name of the web engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the web -// engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go deleted file mode 100644 index 2c5c7ebd..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.34.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go deleted file mode 100644 index 19bf0224..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" - -import ( - "fmt" - "reflect" - - "go.opentelemetry.io/otel/attribute" -) - -// ErrorType returns an [attribute.KeyValue] identifying the error type of err. -func ErrorType(err error) attribute.KeyValue { - if err == nil { - return ErrorTypeOther - } - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) - } - - if value == "" { - return ErrorTypeOther - } - return ErrorTypeKey.String(value) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go deleted file mode 100644 index 88a998f1..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go deleted file mode 100644 index 3c23d459..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.34.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go index 666bded4..267979c0 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go @@ -4,28 +4,53 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" import ( - "fmt" "reflect" "go.opentelemetry.io/otel/attribute" ) // ErrorType returns an [attribute.KeyValue] identifying the error type of err. +// +// If err is nil, the returned attribute has the default value +// [ErrorTypeOther]. +// +// If err's type has the method +// +// ErrorType() string +// +// then the returned attribute has the value of err.ErrorType(). Otherwise, the +// returned attribute has a value derived from the concrete type of err. +// +// The key of the returned attribute is [ErrorTypeKey]. func ErrorType(err error) attribute.KeyValue { if err == nil { return ErrorTypeOther } - t := reflect.TypeOf(err) - var value string - if t.PkgPath() == "" && t.Name() == "" { - // Likely a builtin type. - value = t.String() - } else { - value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + + return ErrorTypeKey.String(errorType(err)) +} + +func errorType(err error) string { + var s string + if et, ok := err.(interface{ ErrorType() string }); ok { + // Prioritize the ErrorType method if available. + s = et.ErrorType() } + if s == "" { + // Fallback to reflection if the ErrorType method is not supported or + // returns an empty value. - if value == "" { - return ErrorTypeOther + t := reflect.TypeOf(err) + pkg, name := t.PkgPath(), t.Name() + if pkg != "" && name != "" { + s = pkg + "." + name + } else { + // The type has no package path or name (predeclared, not-defined, + // or alias for a not-defined type). + // + // This is not guaranteed to be unique, but is a best effort. + s = t.String() + } } - return ErrorTypeKey.String(value) + return s } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go new file mode 100644 index 00000000..a0ddf652 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go @@ -0,0 +1,1728 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "http" namespace. +package httpconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ConnectionStateAttr is an attribute conforming to the http.connection.state +// semantic conventions. It represents the state of the HTTP connection in the +// HTTP connection pool. +type ConnectionStateAttr string + +var ( + // ConnectionStateActive is the active state. + ConnectionStateActive ConnectionStateAttr = "active" + // ConnectionStateIdle is the idle state. + ConnectionStateIdle ConnectionStateAttr = "idle" +) + +// RequestMethodAttr is an attribute conforming to the http.request.method +// semantic conventions. It represents the HTTP request method. +type RequestMethodAttr string + +var ( + // RequestMethodConnect is the CONNECT method. + RequestMethodConnect RequestMethodAttr = "CONNECT" + // RequestMethodDelete is the DELETE method. + RequestMethodDelete RequestMethodAttr = "DELETE" + // RequestMethodGet is the GET method. + RequestMethodGet RequestMethodAttr = "GET" + // RequestMethodHead is the HEAD method. + RequestMethodHead RequestMethodAttr = "HEAD" + // RequestMethodOptions is the OPTIONS method. + RequestMethodOptions RequestMethodAttr = "OPTIONS" + // RequestMethodPatch is the PATCH method. + RequestMethodPatch RequestMethodAttr = "PATCH" + // RequestMethodPost is the POST method. + RequestMethodPost RequestMethodAttr = "POST" + // RequestMethodPut is the PUT method. + RequestMethodPut RequestMethodAttr = "PUT" + // RequestMethodTrace is the TRACE method. + RequestMethodTrace RequestMethodAttr = "TRACE" + // RequestMethodOther is the any HTTP method that the instrumentation has no + // prior knowledge of. + RequestMethodOther RequestMethodAttr = "_OTHER" +) + +// UserAgentSyntheticTypeAttr is an attribute conforming to the +// user_agent.synthetic.type semantic conventions. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +type UserAgentSyntheticTypeAttr string + +var ( + // UserAgentSyntheticTypeBot is the bot source. + UserAgentSyntheticTypeBot UserAgentSyntheticTypeAttr = "bot" + // UserAgentSyntheticTypeTest is the synthetic test source. + UserAgentSyntheticTypeTest UserAgentSyntheticTypeAttr = "test" +) + +// ClientActiveRequests is an instrument used to record metric values conforming +// to the "http.client.active_requests" semantic conventions. It represents the +// number of active HTTP requests. +type ClientActiveRequests struct { + metric.Int64UpDownCounter +} + +var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP requests."), + metric.WithUnit("{request}"), +} + +// NewClientActiveRequests returns a new ClientActiveRequests instrument. +func NewClientActiveRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientActiveRequests, error) { + // Check if the meter is nil. + if m == nil { + return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newClientActiveRequestsOpts + } else { + opt = append(opt, newClientActiveRequestsOpts...) + } + + i, err := m.Int64UpDownCounter( + "http.client.active_requests", + opt..., + ) + if err != nil { + return ClientActiveRequests{noop.Int64UpDownCounter{}}, err + } + return ClientActiveRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientActiveRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientActiveRequests) Name() string { + return "http.client.active_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientActiveRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ClientActiveRequests) Description() string { + return "Number of active HTTP requests." +} + +// Add adds incr to the existing count for attrs. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientActiveRequests) Add( + ctx context.Context, + incr int64, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientActiveRequests) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrRequestMethod returns an optional attribute for the "http.request.method" +// semantic convention. It represents the HTTP request method. +func (ClientActiveRequests) AttrRequestMethod(val RequestMethodAttr) attribute.KeyValue { + return attribute.String("http.request.method", string(val)) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientActiveRequests) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientConnectionDuration is an instrument used to record metric values +// conforming to the "http.client.connection.duration" semantic conventions. It +// represents the duration of the successfully established outbound HTTP +// connections. +type ClientConnectionDuration struct { + metric.Float64Histogram +} + +var newClientConnectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the successfully established outbound HTTP connections."), + metric.WithUnit("s"), +} + +// NewClientConnectionDuration returns a new ClientConnectionDuration instrument. +func NewClientConnectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientConnectionDurationOpts + } else { + opt = append(opt, newClientConnectionDurationOpts...) + } + + i, err := m.Float64Histogram( + "http.client.connection.duration", + opt..., + ) + if err != nil { + return ClientConnectionDuration{noop.Float64Histogram{}}, err + } + return ClientConnectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionDuration) Name() string { + return "http.client.connection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionDuration) Description() string { + return "The duration of the successfully established outbound HTTP connections." +} + +// Record records val to the current distribution for attrs. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientConnectionDuration) Record( + ctx context.Context, + val float64, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func (ClientConnectionDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientConnectionDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientConnectionDuration) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientOpenConnections is an instrument used to record metric values conforming +// to the "http.client.open_connections" semantic conventions. It represents the +// number of outbound HTTP connections that are currently active or idle on the +// client. +type ClientOpenConnections struct { + metric.Int64UpDownCounter +} + +var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), + metric.WithUnit("{connection}"), +} + +// NewClientOpenConnections returns a new ClientOpenConnections instrument. +func NewClientOpenConnections( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientOpenConnections, error) { + // Check if the meter is nil. + if m == nil { + return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newClientOpenConnectionsOpts + } else { + opt = append(opt, newClientOpenConnectionsOpts...) + } + + i, err := m.Int64UpDownCounter( + "http.client.open_connections", + opt..., + ) + if err != nil { + return ClientOpenConnections{noop.Int64UpDownCounter{}}, err + } + return ClientOpenConnections{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOpenConnections) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientOpenConnections) Name() string { + return "http.client.open_connections" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOpenConnections) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientOpenConnections) Description() string { + return "Number of outbound HTTP connections that are currently active or idle on the client." +} + +// Add adds incr to the existing count for attrs. +// +// The connectionState is the state of the HTTP connection in the HTTP connection +// pool. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientOpenConnections) Add( + ctx context.Context, + incr int64, + connectionState ConnectionStateAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.connection.state", string(connectionState)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func (ClientOpenConnections) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientOpenConnections) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientOpenConnections) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientRequestBodySize is an instrument used to record metric values conforming +// to the "http.client.request.body.size" semantic conventions. It represents the +// size of HTTP client request bodies. +type ClientRequestBodySize struct { + metric.Int64Histogram +} + +var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client request bodies."), + metric.WithUnit("By"), +} + +// NewClientRequestBodySize returns a new ClientRequestBodySize instrument. +func NewClientRequestBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestBodySize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientRequestBodySizeOpts + } else { + opt = append(opt, newClientRequestBodySizeOpts...) + } + + i, err := m.Int64Histogram( + "http.client.request.body.size", + opt..., + ) + if err != nil { + return ClientRequestBodySize{noop.Int64Histogram{}}, err + } + return ClientRequestBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestBodySize) Name() string { + return "http.client.request.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestBodySize) Description() string { + return "Size of HTTP client request bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientRequestBodySize) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientRequestBodySize) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientRequestDuration is an instrument used to record metric values conforming +// to the "http.client.request.duration" semantic conventions. It represents the +// duration of HTTP client requests. +type ClientRequestDuration struct { + metric.Float64Histogram +} + +var newClientRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP client requests."), + metric.WithUnit("s"), +} + +// NewClientRequestDuration returns a new ClientRequestDuration instrument. +func NewClientRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientRequestDurationOpts + } else { + opt = append(opt, newClientRequestDurationOpts...) + } + + i, err := m.Float64Histogram( + "http.client.request.duration", + opt..., + ) + if err != nil { + return ClientRequestDuration{noop.Float64Histogram{}}, err + } + return ClientRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestDuration) Name() string { + return "http.client.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestDuration) Description() string { + return "Duration of HTTP client requests." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientRequestDuration) Record( + ctx context.Context, + val float64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientRequestDuration) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientRequestDuration) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// ClientResponseBodySize is an instrument used to record metric values +// conforming to the "http.client.response.body.size" semantic conventions. It +// represents the size of HTTP client response bodies. +type ClientResponseBodySize struct { + metric.Int64Histogram +} + +var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client response bodies."), + metric.WithUnit("By"), +} + +// NewClientResponseBodySize returns a new ClientResponseBodySize instrument. +func NewClientResponseBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseBodySize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientResponseBodySizeOpts + } else { + opt = append(opt, newClientResponseBodySizeOpts...) + } + + i, err := m.Int64Histogram( + "http.client.response.body.size", + opt..., + ) + if err != nil { + return ClientResponseBodySize{noop.Int64Histogram{}}, err + } + return ClientResponseBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseBodySize) Name() string { + return "http.client.response.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseBodySize) Description() string { + return "Size of HTTP client response bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientResponseBodySize) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientResponseBodySize) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ServerActiveRequests is an instrument used to record metric values conforming +// to the "http.server.active_requests" semantic conventions. It represents the +// number of active HTTP server requests. +type ServerActiveRequests struct { + metric.Int64UpDownCounter +} + +var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP server requests."), + metric.WithUnit("{request}"), +} + +// NewServerActiveRequests returns a new ServerActiveRequests instrument. +func NewServerActiveRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ServerActiveRequests, error) { + // Check if the meter is nil. + if m == nil { + return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newServerActiveRequestsOpts + } else { + opt = append(opt, newServerActiveRequestsOpts...) + } + + i, err := m.Int64UpDownCounter( + "http.server.active_requests", + opt..., + ) + if err != nil { + return ServerActiveRequests{noop.Int64UpDownCounter{}}, err + } + return ServerActiveRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerActiveRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ServerActiveRequests) Name() string { + return "http.server.active_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerActiveRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ServerActiveRequests) Description() string { + return "Number of active HTTP server requests." +} + +// Add adds incr to the existing count for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (m ServerActiveRequests) Add( + ctx context.Context, + incr int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerActiveRequests) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerActiveRequests) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ServerRequestBodySize is an instrument used to record metric values conforming +// to the "http.server.request.body.size" semantic conventions. It represents the +// size of HTTP server request bodies. +type ServerRequestBodySize struct { + metric.Int64Histogram +} + +var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server request bodies."), + metric.WithUnit("By"), +} + +// NewServerRequestBodySize returns a new ServerRequestBodySize instrument. +func NewServerRequestBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestBodySize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerRequestBodySizeOpts + } else { + opt = append(opt, newServerRequestBodySizeOpts...) + } + + i, err := m.Int64Histogram( + "http.server.request.body.size", + opt..., + ) + if err != nil { + return ServerRequestBodySize{noop.Int64Histogram{}}, err + } + return ServerRequestBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestBodySize) Name() string { + return "http.server.request.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestBodySize) Description() string { + return "Size of HTTP server request bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerRequestBodySize) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerRequestBodySize) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerRequestBodySize) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerRequestBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} + +// ServerRequestDuration is an instrument used to record metric values conforming +// to the "http.server.request.duration" semantic conventions. It represents the +// duration of HTTP server requests. +type ServerRequestDuration struct { + metric.Float64Histogram +} + +var newServerRequestDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP server requests."), + metric.WithUnit("s"), +} + +// NewServerRequestDuration returns a new ServerRequestDuration instrument. +func NewServerRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerRequestDurationOpts + } else { + opt = append(opt, newServerRequestDurationOpts...) + } + + i, err := m.Float64Histogram( + "http.server.request.duration", + opt..., + ) + if err != nil { + return ServerRequestDuration{noop.Float64Histogram{}}, err + } + return ServerRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestDuration) Name() string { + return "http.server.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestDuration) Description() string { + return "Duration of HTTP server requests." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (m ServerRequestDuration) Record( + ctx context.Context, + val float64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerRequestDuration) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerRequestDuration) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} + +// ServerResponseBodySize is an instrument used to record metric values +// conforming to the "http.server.response.body.size" semantic conventions. It +// represents the size of HTTP server response bodies. +type ServerResponseBodySize struct { + metric.Int64Histogram +} + +var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server response bodies."), + metric.WithUnit("By"), +} + +// NewServerResponseBodySize returns a new ServerResponseBodySize instrument. +func NewServerResponseBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseBodySize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerResponseBodySizeOpts + } else { + opt = append(opt, newServerResponseBodySizeOpts...) + } + + i, err := m.Int64Histogram( + "http.server.response.body.size", + opt..., + ) + if err != nil { + return ServerResponseBodySize{noop.Int64Histogram{}}, err + } + return ServerResponseBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseBodySize) Name() string { + return "http.server.response.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseBodySize) Description() string { + return "Size of HTTP server response bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerResponseBodySize) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerResponseBodySize) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go new file mode 100644 index 00000000..fd064530 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go @@ -0,0 +1,2264 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogExportedOpts + } else { + opt = append(opt, newSDKExporterLogExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + opt..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterLogInflightOpts + } else { + opt = append(opt, newSDKExporterLogInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + opt..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointExportedOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterMetricDataPointInflightOpts + } else { + opt = append(opt, newSDKExporterMetricDataPointInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + opt..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterOperationDurationOpts + } else { + opt = append(opt, newSDKExporterOperationDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + opt..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanExportedOpts + } else { + opt = append(opt, newSDKExporterSpanExportedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + opt..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKExporterSpanInflightOpts + } else { + opt = append(opt, newSDKExporterSpanInflightOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + opt..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +var newSDKLogCreatedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKLogCreatedOpts + } else { + opt = append(opt, newSDKLogCreatedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + opt..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newSDKMetricReaderCollectionDurationOpts + } else { + opt = append(opt, newSDKMetricReaderCollectionDurationOpts...) + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + opt..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogProcessedOpts + } else { + opt = append(opt, newSDKProcessorLogProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + opt..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorLogQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorLogQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorLogQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanProcessedOpts + } else { + opt = append(opt, newSDKProcessorSpanProcessedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + opt..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueCapacityOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKProcessorSpanQueueSizeOpts + } else { + opt = append(opt, newSDKProcessorSpanQueueSizeOpts...) + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + opt..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanLiveOpts + } else { + opt = append(opt, newSDKSpanLiveOpts...) + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + opt..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +var newSDKSpanStartedOpts = []metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + if len(opt) == 0 { + opt = newSDKSpanStartedOpts + } else { + opt = append(opt, newSDKSpanStartedOpts...) + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + opt..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go new file mode 100644 index 00000000..089b0c45 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv/metric.go @@ -0,0 +1,1010 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package rpcconv provides types and functionality for OpenTelemetry semantic +// conventions in the "rpc" namespace. +package rpcconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ClientDuration is an instrument used to record metric values conforming to the +// "rpc.client.duration" semantic conventions. It represents the measures the +// duration of outbound RPC. +type ClientDuration struct { + metric.Float64Histogram +} + +var newClientDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of outbound RPC."), + metric.WithUnit("ms"), +} + +// NewClientDuration returns a new ClientDuration instrument. +func NewClientDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientDurationOpts + } else { + opt = append(opt, newClientDurationOpts...) + } + + i, err := m.Float64Histogram( + "rpc.client.duration", + opt..., + ) + if err != nil { + return ClientDuration{noop.Float64Histogram{}}, err + } + return ClientDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientDuration) Name() string { + return "rpc.client.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ClientDuration) Description() string { + return "Measures the duration of outbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestSize is an instrument used to record metric values conforming to +// the "rpc.client.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ClientRequestSize struct { + metric.Int64Histogram +} + +var newClientRequestSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewClientRequestSize returns a new ClientRequestSize instrument. +func NewClientRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientRequestSizeOpts + } else { + opt = append(opt, newClientRequestSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.request.size", + opt..., + ) + if err != nil { + return ClientRequestSize{noop.Int64Histogram{}}, err + } + return ClientRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestSize) Name() string { + return "rpc.client.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ClientRequestsPerRPC struct { + metric.Int64Histogram +} + +var newClientRequestsPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), +} + +// NewClientRequestsPerRPC returns a new ClientRequestsPerRPC instrument. +func NewClientRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientRequestsPerRPCOpts + } else { + opt = append(opt, newClientRequestsPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.requests_per_rpc", + opt..., + ) + if err != nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ClientRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestsPerRPC) Name() string { + return "rpc.client.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponseSize is an instrument used to record metric values conforming to +// the "rpc.client.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ClientResponseSize struct { + metric.Int64Histogram +} + +var newClientResponseSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewClientResponseSize returns a new ClientResponseSize instrument. +func NewClientResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientResponseSizeOpts + } else { + opt = append(opt, newClientResponseSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.response.size", + opt..., + ) + if err != nil { + return ClientResponseSize{noop.Int64Histogram{}}, err + } + return ClientResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseSize) Name() string { + return "rpc.client.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ClientResponsesPerRPC struct { + metric.Int64Histogram +} + +var newClientResponsesPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), +} + +// NewClientResponsesPerRPC returns a new ClientResponsesPerRPC instrument. +func NewClientResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newClientResponsesPerRPCOpts + } else { + opt = append(opt, newClientResponsesPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.client.responses_per_rpc", + opt..., + ) + if err != nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ClientResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponsesPerRPC) Name() string { + return "rpc.client.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerDuration is an instrument used to record metric values conforming to the +// "rpc.server.duration" semantic conventions. It represents the measures the +// duration of inbound RPC. +type ServerDuration struct { + metric.Float64Histogram +} + +var newServerDurationOpts = []metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms"), +} + +// NewServerDuration returns a new ServerDuration instrument. +func NewServerDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerDuration{noop.Float64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerDurationOpts + } else { + opt = append(opt, newServerDurationOpts...) + } + + i, err := m.Float64Histogram( + "rpc.server.duration", + opt..., + ) + if err != nil { + return ServerDuration{noop.Float64Histogram{}}, err + } + return ServerDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerDuration) Name() string { + return "rpc.server.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ServerDuration) Description() string { + return "Measures the duration of inbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestSize is an instrument used to record metric values conforming to +// the "rpc.server.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ServerRequestSize struct { + metric.Int64Histogram +} + +var newServerRequestSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewServerRequestSize returns a new ServerRequestSize instrument. +func NewServerRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerRequestSizeOpts + } else { + opt = append(opt, newServerRequestSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.request.size", + opt..., + ) + if err != nil { + return ServerRequestSize{noop.Int64Histogram{}}, err + } + return ServerRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestSize) Name() string { + return "rpc.server.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ServerRequestsPerRPC struct { + metric.Int64Histogram +} + +var newServerRequestsPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), +} + +// NewServerRequestsPerRPC returns a new ServerRequestsPerRPC instrument. +func NewServerRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerRequestsPerRPCOpts + } else { + opt = append(opt, newServerRequestsPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.requests_per_rpc", + opt..., + ) + if err != nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ServerRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestsPerRPC) Name() string { + return "rpc.server.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponseSize is an instrument used to record metric values conforming to +// the "rpc.server.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ServerResponseSize struct { + metric.Int64Histogram +} + +var newServerResponseSizeOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), +} + +// NewServerResponseSize returns a new ServerResponseSize instrument. +func NewServerResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseSize{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerResponseSizeOpts + } else { + opt = append(opt, newServerResponseSizeOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.response.size", + opt..., + ) + if err != nil { + return ServerResponseSize{noop.Int64Histogram{}}, err + } + return ServerResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseSize) Name() string { + return "rpc.server.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ServerResponsesPerRPC struct { + metric.Int64Histogram +} + +var newServerResponsesPerRPCOpts = []metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), +} + +// NewServerResponsesPerRPC returns a new ServerResponsesPerRPC instrument. +func NewServerResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + if len(opt) == 0 { + opt = newServerResponsesPerRPCOpts + } else { + opt = append(opt, newServerResponsesPerRPCOpts...) + } + + i, err := m.Int64Histogram( + "rpc.server.responses_per_rpc", + opt..., + ) + if err != nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ServerResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponsesPerRPC) Name() string { + return "rpc.server.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index aea11a2b..d9ecef1c 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -4,6 +4,7 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( + "slices" "time" "go.opentelemetry.io/otel/attribute" @@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption { }) } -// WithInstrumentationAttributes sets the instrumentation attributes. +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +// WithInstrumentationAttributes adds the instrumentation attributes. // -// The passed attributes will be de-duplicated. +// This is equivalent to calling [WithInstrumentationAttributeSet] with an +// [attribute.Set] created from a clone of the passed attributes. +// [WithInstrumentationAttributeSet] is recommended for more control. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + set := attribute.NewSet(slices.Clone(attr)...) + return WithInstrumentationAttributeSet(set) +} + +// WithInstrumentationAttributeSet adds the instrumentation attributes. +// +// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet] +// options are passed, the attributes will be merged together in the order +// they are passed. Attributes with duplicate keys will use the last value passed. +func WithInstrumentationAttributeSet(set attribute.Set) TracerOption { + if set.Len() == 0 { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + return config + }) + } + return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) + if config.attrs.Len() == 0 { + config.attrs = set + } else { + config.attrs = mergeSets(config.attrs, set) + } return config }) } diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go index d3aa476e..d01e7936 100644 --- a/vendor/go.opentelemetry.io/otel/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -66,6 +66,10 @@ type Span interface { // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. + // + // Note that adding attributes at span creation using [WithAttributes] is preferred + // to calling SetAttribute later, as samplers can only consider information + // already present during span creation. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index bcaa5aa5..0d5b0291 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.38.0" + return "1.39.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 07145e25..f4a3893e 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.38.0 + version: v1.39.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.60.0 + version: v0.61.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.14.0 + version: v0.15.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,9 +36,28 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.13 + version: v0.0.14 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - go.opentelemetry.io/otel/trace/internal/telemetry/test +modules: + go.opentelemetry.io/otel/exporters/stdout/stdouttrace: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/prometheus: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp: + version-refs: + - ./internal/version.go + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp: + version-refs: + - ./internal/version.go diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 93bcaab0..9a4bd123 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1161,7 +1189,7 @@ var defaultRFC9218Priority = PriorityParam{ // PriorityParam struct below is a superset of both schemes. The exported // symbols are from RFC 7540 and the non-exported ones are from RFC 9218. -// PriorityParam are the stream prioritzation parameters. +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index be759b60..1965913e 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -3076,35 +3077,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 4d3890f9..7de27be5 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -185,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -232,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index 6d24d6a1..4e33c29a 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -214,8 +214,8 @@ func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -302,7 +302,6 @@ func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go similarity index 99% rename from vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go index 9b5b8808..cb4cadc3 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -39,7 +39,7 @@ type priorityWriteSchedulerRFC9218 struct { prioritizeIncremental bool } -func newPriorityWriteSchedulerRFC9128() WriteScheduler { +func newPriorityWriteSchedulerRFC9218() WriteScheduler { ws := &priorityWriteSchedulerRFC9218{ streams: make(map[uint32]streamMetadata), } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index de34feb8..3e3b6306 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -9,7 +9,6 @@ package oauth2 // import "golang.org/x/oauth2" import ( - "bytes" "context" "errors" "net/http" @@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // PKCE), https://www.oauth.com/oauth2-servers/pkce/ and // https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 1d8cffae..2f45dbc8 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d1c8b264..fd39be4e 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -226,6 +226,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -255,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -529,6 +531,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || @@ -611,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 9439af96..06c0eea6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2643,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b6db27d9..120a7b35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -853,20 +853,86 @@ const ( DM_VERSION_MAJOR = 0x4 DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -1152,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -1539,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c @@ -2276,7 +2354,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2463,6 +2701,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2758,6 +3049,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -3091,6 +3399,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3317,6 +3666,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3553,6 +3912,8 @@ const ( UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fb..97a61fc5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34a..a0d6d498 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c1..dd9c903f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba..384c61ca 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c..6384c983 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3..553c1c6f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a6..b3339f20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a303..177091d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c8..c5abf156 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb..f1f3fadf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce240..203ad9c5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb..4b9abcb2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e952..f8798303 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7..64347eb3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e..7d719117 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb..8935d10a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 944e75a1..c1a46701 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3590,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3597,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -6332,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec..50e8e644 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index bd513373..69439df2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -892,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -916,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 358be3c7..6e4f50eb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2320,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 426151a0..f25b7308 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -1624,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { @@ -1664,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { return } +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { @@ -1684,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa return } +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { var _p0 uint32 if initialNotification { diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index bddb2e2a..9255449b 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -413,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -438,7 +438,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -478,7 +478,7 @@ func visualLength(runes []rune) int { return length } -// histroryAt unlocks the terminal and relocks it while calling History.At. +// historyAt unlocks the terminal and relocks it while calling History.At. func (t *Terminal) historyAt(idx int) (string, bool) { t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. defer t.lock.Lock() // panic in At (or Len) protection. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 1de0ce66..2079de7b 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -33,17 +33,21 @@ guidelines, there may be valid reasons to do so, but it should be rare. ## Guidelines for Pull Requests -How to get your contributions merged smoothly and quickly: +Please read the following carefully to ensure your contributions can be merged +smoothly and quickly. + +### PR Contents - Create **small PRs** that are narrowly focused on **addressing a single concern**. We often receive PRs that attempt to fix several things at the same time, and if one part of the PR has a problem, that will hold up the entire PR. -- For **speculative changes**, consider opening an issue and discussing it - first. If you are suggesting a behavioral or API change, consider starting - with a [gRFC proposal](https://github.com/grpc/proposal). Many new features - that are not bug fixes will require cross-language agreement. +- If your change does not address an **open issue** with an **agreed + resolution**, consider opening an issue and discussing it first. If you are + suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). Many new features that are not + bug fixes will require cross-language agreement. - If you want to fix **formatting or style**, consider whether your changes are an obvious improvement or might be considered a personal preference. If a @@ -56,16 +60,6 @@ How to get your contributions merged smoothly and quickly: often written as "iff". Please do not make spelling correction changes unless you are certain they are misspellings. -- Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a GitHub issue if it exists. - -- Maintain a **clean commit history** and use **meaningful commit messages**. - PRs with messy commit histories are difficult to review and won't be merged. - Before sending your PR, ensure your changes are based on top of the latest - `upstream/master` commits, and avoid rebasing in the middle of a code review. - You should **never use `git push -f`** unless absolutely necessary during a - review, as it can interfere with GitHub's tracking of comments. - - **All tests need to be passing** before your change can be merged. We recommend you run tests locally before creating your PR to catch breakages early on: @@ -81,15 +75,80 @@ How to get your contributions merged smoothly and quickly: GitHub, which will trigger a GitHub Actions run that you can use to verify everything is passing. -- If you are adding a new file, make sure it has the **copyright message** +- Note that there are two GitHub actions checks that need not be green: + + 1. We test the freshness of the generated proto code we maintain via the + `vet-proto` check. If the source proto files are updated, but our repo is + not updated, an optional checker will fail. This will be fixed by our team + in a separate PR and will not prevent the merge of your PR. + + 2. We run a checker that will fail if there is any change in dependencies of + an exported package via the `dependencies` check. If new dependencies are + added that are not appropriate, we may not accept your PR (see below). + +- If you are adding a **new file**, make sure it has the **copyright message** template at the top as a comment. You can copy the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number of exceptions. **If your contribution introduces new dependencies**, you will - need a discussion with gRPC-Go maintainers. A GitHub action check will run on - every PR, and will flag any transitive dependency changes from any public - package. + need a discussion with gRPC-Go maintainers. + +### PR Descriptions + +- **PR titles** should start with the name of the component being addressed, or + the type of change. Examples: transport, client, server, round_robin, xds, + cleanup, deps. + +- Read and follow the **guidelines for PR titles and descriptions** here: + https://google.github.io/eng-practices/review/developer/cl-descriptions.html + + *particularly* the sections "First Line" and "Body is Informative". + + Note: your PR description will be used as the git commit message in a + squash-and-merge if your PR is approved. We may make changes to this as + necessary. + +- **Does this PR relate to an open issue?** On the first line, please use the + tag `Fixes #` to ensure the issue is closed when the PR is merged. Or + use `Updates #` if the PR is related to an open issue, but does not fix + it. Consider filing an issue if one does not already exist. + +- PR descriptions *must* conclude with **release notes** as follows: + + ``` + RELEASE NOTES: + * : + ``` + + This need not match the PR title. + + The summary must: + + * be something that gRPC users will understand. + + * clearly explain the feature being added, the issue being fixed, or the + behavior being changed, etc. If fixing a bug, be clear about how the bug + can be triggered by an end-user. + + * begin with a capital letter and use complete sentences. + + * be as short as possible to describe the change being made. + + If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or + GitHub-related, use `RELEASE NOTES: n/a`. + +### PR Process + +- Please **self-review** your code changes before sending your PR. This will + prevent simple, obvious errors from causing delays. + +- Maintain a **clean commit history** and use **meaningful commit messages**. + PRs with messy commit histories are difficult to review and won't be merged. + Before sending your PR, ensure your changes are based on top of the latest + `upstream/master` commits, and avoid rebasing in the middle of a code review. + You should **never use `git push -f`** unless absolutely necessary during a + review, as it can interfere with GitHub's tracking of comments. - Unless your PR is trivial, you should **expect reviewer comments** that you will need to address before merging. We'll label the PR as `Status: Requires @@ -98,5 +157,3 @@ How to get your contributions merged smoothly and quickly: `stale`, and we will automatically close it after 7 days if we don't hear back from you. Please feel free to ping issues or bugs if you do not get a response within a week. - -- Exceptions to the rules can be made if there's a compelling reason to do so. diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index ea889981..b4bc3a2b 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,55 +16,124 @@ * */ -// Package pickfirst contains the pick_first load balancing policy. +// Package pickfirst contains the pick_first load balancing policy which +// is the universal leaf policy. package pickfirst import ( "encoding/json" "errors" "fmt" - rand "math/rand/v2" + "net" + "net/netip" + "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - - _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { - if envconfig.NewPickFirstEnabled { - return - } balancer.Register(pickfirstBuilder{}) } -var logger = grpclog.Component("pick-first-lb") +// Name is the name of the pick_first balancer. +const Name = "pick_first" + +// enableHealthListenerKeyType is a unique key type used in resolver +// attributes to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "{disconnection}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "{attempt}", + Labels: []string{"grpc.target"}, + Default: false, + }) +) const ( - // Name is the name of the pick_first balancer. - Name = "pick_first" - logPrefix = "[pick-first-lb %p] " + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 ) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{cc: cc} +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMapV2[*scData](), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (pickfirstBuilder) Name() string { +func (b pickfirstBuilder) Name() string { return Name } +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -74,90 +143,129 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } - return cfg, nil + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil } type pickfirstBalancer struct { - logger *internalgrpclog.PrefixLogger - state connectivity.State - cc balancer.ClientConn - subConn balancer.SubConn + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMapV2[*scData] + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { if b.logger.V(2) { b.logger.Infof("Received error from the name resolver: %v", err) } - if b.subConn == nil { - b.state = connectivity.TransientFailure - } - if b.state != connectivity.TransientFailure { - // The picker will not change since the balancer does not currently - // report an error. + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } return } - b.cc.UpdateState(balancer.State{ + + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) } -// Shuffler is an interface for shuffling an address list. -type Shuffler interface { - ShuffleAddressListForTesting(n int, swap func(i, j int)) -} - -// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n -// is the number of elements. swap swaps the elements with indexes i and j. -func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } - func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // The resolver reported an empty address list. Treat it like an error by - // calling b.ResolverError. - if b.subConn != nil { - // Shut down the old subConn. All addresses were removed, so it is - // no longer valid. - b.subConn.Shutdown() - b.subConn = nil - } - b.ResolverError(errors.New("produced zero addresses")) + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig - // already does so. + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } - var addrs []resolver.Address + var newAddrs []resolver.Address if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling will - // change the order of endpoints but not touch the order of the addresses - // within each endpoint. - A61 + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } - // "Flatten the list by concatenating the ordered list of addresses for each - // of the endpoints, in order." - A61 + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8304 section 4." - A61 - // TODO: support the above language. - addrs = append(addrs, endpoint.Addresses...) + newAddrs = append(newAddrs, endpoint.Addresses...) } } else { // Endpoints not set, process addresses until we migrate resolver @@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. - addrs = state.ResolverState.Addresses + newAddrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] }) } } - if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, addrs) + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } - var subConn balancer.SubConn - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(subConn, state) - }, - }) - if err != nil { - if b.logger.V(2) { - b.logger.Infof("Failed to create new SubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - return balancer.ErrBadResolverState + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() } - b.subConn = subConn - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.subConn.Connect() return nil } @@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) } -func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if b.logger.V(2) { - b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + // Move the balancer into CONNECTING state immediately. This is done to + // avoid staying in IDLE if a resolver update arrives before the first + // SubConn reports CONNECTING. + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.subConn.Shutdown() + } + b.subConns = resolver.NewAddressMapV2[*scData]() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMapV2[bool]() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + seenAddrs.Set(addr, true) + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMapV2[bool]() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, sd := range b.subConns.Values() { + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMapV2[*scData]() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + switch sd.rawConnectivityState { + case connectivity.Idle: + sd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return } - if b.subConn != subConn { + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } if b.logger.V(2) { - b.logger.Infof("Ignored state change because subConn is not recognized") + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { return } - if state.ConnectivityState == connectivity.Shutdown { - b.subConn = nil + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } - switch state.ConnectivityState { - case connectivity.Ready: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, - }) - case connectivity.Connecting: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. See A62. + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. case connectivity.Idle: - if b.state == connectivity.TransientFailure { - // We stay in TransientFailure until we are Ready. Also kick the - // subConn out of Idle into Connecting. See A62. - b.subConn.Connect() + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, sd := range b.subConns.Values() { + if !sd.connectionFailedInFirstPass { return } - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &idlePicker{subConn: subConn}, + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, sd := range b.subConns.Values() { + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, }) case connectivity.TransientFailure: - b.cc.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &picker{err: state.ConnectionError}, + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) } - b.state = state.ConnectivityState } -func (b *pickfirstBalancer) Close() { +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) } -func (b *pickfirstBalancer) ExitIdle() { - if b.subConn != nil && b.state == connectivity.Idle { - b.subConn.Connect() - } +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) } type picker struct { @@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - subConn balancer.SubConn + exitIdle func() } func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.subConn.Connect() + i.exitIdle() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go deleted file mode 100644 index 67f315a0..00000000 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ /dev/null @@ -1,906 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package pickfirstleaf contains the pick_first load balancing policy which -// will be the universal leaf policy after dualstack changes are implemented. -// -// # Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. -package pickfirstleaf - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/netip" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/pickfirst/internal" - "google.golang.org/grpc/connectivity" - expstats "google.golang.org/grpc/experimental/stats" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/envconfig" - internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -func init() { - if envconfig.NewPickFirstEnabled { - // Register as the default pick_first balancer. - Name = "pick_first" - } - balancer.Register(pickfirstBuilder{}) -} - -// enableHealthListenerKeyType is a unique key type used in resolver -// attributes to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} - -var ( - logger = grpclog.Component("pick-first-leaf-lb") - // Name is the name of the pick_first_leaf balancer. - // It is changed to "pick_first" in init() if this balancer is to be - // registered as the default pickfirst. - Name = "pick_first_leaf" - disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.disconnections", - Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", - Unit: "{disconnection}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_succeeded", - Description: "EXPERIMENTAL. Number of successful connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) - connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ - Name: "grpc.lb.pick_first.connection_attempts_failed", - Description: "EXPERIMENTAL. Number of failed connection attempts.", - Unit: "{attempt}", - Labels: []string{"grpc.target"}, - Default: false, - }) -) - -const ( - // TODO: change to pick-first when this becomes the default pick_first policy. - logPrefix = "[pick-first-leaf-lb %p] " - // connectionDelayInterval is the time to wait for during the happy eyeballs - // pass before starting the next connection attempt. - connectionDelayInterval = 250 * time.Millisecond -) - -type ipAddrFamily int - -const ( - // ipAddrFamilyUnknown represents strings that can't be parsed as an IP - // address. - ipAddrFamilyUnknown ipAddrFamily = iota - ipAddrFamilyV4 - ipAddrFamilyV6 -) - -type pickfirstBuilder struct{} - -func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { - b := &pickfirstBalancer{ - cc: cc, - target: bo.Target.String(), - metricsRecorder: cc.MetricsRecorder(), - - subConns: resolver.NewAddressMapV2[*scData](), - state: connectivity.Connecting, - cancelConnectionTimer: func() {}, - } - b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) - return b -} - -func (b pickfirstBuilder) Name() string { - return Name -} - -func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg pfConfig - if err := json.Unmarshal(js, &cfg); err != nil { - return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) - } - return cfg, nil -} - -// EnableHealthListener updates the state to configure pickfirst for using a -// generic health listener. -func EnableHealthListener(state resolver.State) resolver.State { - state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) - return state -} - -type pfConfig struct { - serviceconfig.LoadBalancingConfig `json:"-"` - - // If set to true, instructs the LB policy to shuffle the order of the list - // of endpoints received from the name resolver before attempting to - // connect to them. - ShuffleAddressList bool `json:"shuffleAddressList"` -} - -// scData keeps track of the current state of the subConn. -// It is not safe for concurrent access. -type scData struct { - // The following fields are initialized at build time and read-only after - // that. - subConn balancer.SubConn - addr resolver.Address - - rawConnectivityState connectivity.State - // The effective connectivity state based on raw connectivity, health state - // and after following sticky TransientFailure behaviour defined in A62. - effectiveState connectivity.State - lastErr error - connectionFailedInFirstPass bool -} - -func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { - sd := &scData{ - rawConnectivityState: connectivity.Idle, - effectiveState: connectivity.Idle, - addr: addr, - } - sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ - StateListener: func(state balancer.SubConnState) { - b.updateSubConnState(sd, state) - }, - }) - if err != nil { - return nil, err - } - sd.subConn = sc - return sd, nil -} - -type pickfirstBalancer struct { - // The following fields are initialized at build time and read-only after - // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn - target string - metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil - - // The mutex is used to ensure synchronization of updates triggered - // from the idle picker and the already serialized resolver, - // SubConn state updates. - mu sync.Mutex - // State reported to the channel based on SubConn states and resolver - // updates. - state connectivity.State - // scData for active subonns mapped by address. - subConns *resolver.AddressMapV2[*scData] - addressList addressList - firstPass bool - numTF int - cancelConnectionTimer func() - healthCheckingEnabled bool -} - -// ResolverError is called by the ClientConn when the name resolver produces -// an error or when pickfirst determined the resolver update to be invalid. -func (b *pickfirstBalancer) ResolverError(err error) { - b.mu.Lock() - defer b.mu.Unlock() - b.resolverErrorLocked(err) -} - -func (b *pickfirstBalancer) resolverErrorLocked(err error) { - if b.logger.V(2) { - b.logger.Infof("Received error from the name resolver: %v", err) - } - - // The picker will not change since the balancer does not currently - // report an error. If the balancer hasn't received a single good resolver - // update yet, transition to TRANSIENT_FAILURE. - if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { - if b.logger.V(2) { - b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") - } - return - } - - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) -} - -func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - b.mu.Lock() - defer b.mu.Unlock() - b.cancelConnectionTimer() - if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { - // Cleanup state pertaining to the previous resolver state. - // Treat an empty address list like an error by calling b.ResolverError. - b.closeSubConnsLocked() - b.addressList.updateAddrs(nil) - b.resolverErrorLocked(errors.New("produced zero addresses")) - return balancer.ErrBadResolverState - } - b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil - cfg, ok := state.BalancerConfig.(pfConfig) - if state.BalancerConfig != nil && !ok { - return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) - } - - if b.logger.V(2) { - b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) - } - - var newAddrs []resolver.Address - if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { - // Perform the optional shuffling described in gRFC A62. The shuffling - // will change the order of endpoints but not touch the order of the - // addresses within each endpoint. - A61 - if cfg.ShuffleAddressList { - endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - - // "Flatten the list by concatenating the ordered list of addresses for - // each of the endpoints, in order." - A61 - for _, endpoint := range endpoints { - newAddrs = append(newAddrs, endpoint.Addresses...) - } - } else { - // Endpoints not set, process addresses until we migrate resolver - // emissions fully to Endpoints. The top channel does wrap emitted - // addresses with endpoints, however some balancers such as weighted - // target do not forward the corresponding correct endpoints down/split - // endpoints properly. Once all balancers correctly forward endpoints - // down, can delete this else conditional. - newAddrs = state.ResolverState.Addresses - if cfg.ShuffleAddressList { - newAddrs = append([]resolver.Address{}, newAddrs...) - internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) - } - } - - // If an address appears in multiple endpoints or in the same endpoint - // multiple times, we keep it only once. We will create only one SubConn - // for the address because an AddressMap is used to store SubConns. - // Not de-duplicating would result in attempting to connect to the same - // SubConn multiple times in the same pass. We don't want this. - newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - - prevAddr := b.addressList.currentAddress() - prevSCData, found := b.subConns.Get(prevAddr) - prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready - b.addressList.updateAddrs(newAddrs) - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. - if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { - return nil - } - - b.reconcileSubConnsLocked(newAddrs) - // If it's the first resolver update or the balancer was already READY - // (but the new address list does not contain the ready SubConn) or - // CONNECTING, enter CONNECTING. - // We may be in TRANSIENT_FAILURE due to a previous empty address list, - // we should still enter CONNECTING because the sticky TF behaviour - // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported - // due to connectivity failures. - if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { - // Start connection attempt at first address. - b.forceUpdateConcludedStateLocked(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - b.startFirstPassLocked() - } else if b.state == connectivity.TransientFailure { - // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until - // we're READY. See A62. - b.startFirstPassLocked() - } - return nil -} - -// UpdateSubConnState is unused as a StateListener is always registered when -// creating SubConns. -func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) -} - -func (b *pickfirstBalancer) Close() { - b.mu.Lock() - defer b.mu.Unlock() - b.closeSubConnsLocked() - b.cancelConnectionTimer() - b.state = connectivity.Shutdown -} - -// ExitIdle moves the balancer out of idle state. It can be called concurrently -// by the idlePicker and clientConn so access to variables should be -// synchronized. -func (b *pickfirstBalancer) ExitIdle() { - b.mu.Lock() - defer b.mu.Unlock() - if b.state == connectivity.Idle { - b.startFirstPassLocked() - } -} - -func (b *pickfirstBalancer) startFirstPassLocked() { - b.firstPass = true - b.numTF = 0 - // Reset the connection attempt record for existing SubConns. - for _, sd := range b.subConns.Values() { - sd.connectionFailedInFirstPass = false - } - b.requestConnectionLocked() -} - -func (b *pickfirstBalancer) closeSubConnsLocked() { - for _, sd := range b.subConns.Values() { - sd.subConn.Shutdown() - } - b.subConns = resolver.NewAddressMapV2[*scData]() -} - -// deDupAddresses ensures that each address appears only once in the slice. -func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMapV2[*scData]() - retAddrs := []resolver.Address{} - - for _, addr := range addrs { - if _, ok := seenAddrs.Get(addr); ok { - continue - } - retAddrs = append(retAddrs, addr) - } - return retAddrs -} - -// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) -// as per RFC-8305 section 4. -// Whichever address family is first in the list is followed by an address of -// the other address family; that is, if the first address in the list is IPv6, -// then the first IPv4 address should be moved up in the list to be second in -// the list. It doesn't support configuring "First Address Family Count", i.e. -// there will always be a single member of the first address family at the -// beginning of the interleaved list. -// Addresses that are neither IPv4 nor IPv6 are treated as part of a third -// "unknown" family for interleaving. -// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 -func interleaveAddresses(addrs []resolver.Address) []resolver.Address { - familyAddrsMap := map[ipAddrFamily][]resolver.Address{} - interleavingOrder := []ipAddrFamily{} - for _, addr := range addrs { - family := addressFamily(addr.Addr) - if _, found := familyAddrsMap[family]; !found { - interleavingOrder = append(interleavingOrder, family) - } - familyAddrsMap[family] = append(familyAddrsMap[family], addr) - } - - interleavedAddrs := make([]resolver.Address, 0, len(addrs)) - - for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { - // Some IP types may have fewer addresses than others, so we look for - // the next type that has a remaining member to add to the interleaved - // list. - family := interleavingOrder[curFamilyIdx] - remainingMembers := familyAddrsMap[family] - if len(remainingMembers) > 0 { - interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) - familyAddrsMap[family] = remainingMembers[1:] - } - } - - return interleavedAddrs -} - -// addressFamily returns the ipAddrFamily after parsing the address string. -// If the address isn't of the format "ip-address:port", it returns -// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when -// using a resolver like passthrough where the address may be a hostname in -// some format that the dialer can resolve. -func addressFamily(address string) ipAddrFamily { - // Parse the IP after removing the port. - host, _, err := net.SplitHostPort(address) - if err != nil { - return ipAddrFamilyUnknown - } - ip, err := netip.ParseAddr(host) - if err != nil { - return ipAddrFamilyUnknown - } - switch { - case ip.Is4() || ip.Is4In6(): - return ipAddrFamilyV4 - case ip.Is6(): - return ipAddrFamilyV6 - default: - return ipAddrFamilyUnknown - } -} - -// reconcileSubConnsLocked updates the active subchannels based on a new address -// list from the resolver. It does this by: -// - closing subchannels: any existing subchannels associated with addresses -// that are no longer in the updated list are shut down. -// - removing subchannels: entries for these closed subchannels are removed -// from the subchannel map. -// -// This ensures that the subchannel map accurately reflects the current set of -// addresses received from the name resolver. -func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMapV2[bool]() - for _, addr := range newAddrs { - newAddrsMap.Set(addr, true) - } - - for _, oldAddr := range b.subConns.Keys() { - if _, ok := newAddrsMap.Get(oldAddr); ok { - continue - } - val, _ := b.subConns.Get(oldAddr) - val.subConn.Shutdown() - b.subConns.Delete(oldAddr) - } -} - -// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn -// becomes ready, which means that all other subConn must be shutdown. -func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { - b.cancelConnectionTimer() - for _, sd := range b.subConns.Values() { - if sd.subConn != selected.subConn { - sd.subConn.Shutdown() - } - } - b.subConns = resolver.NewAddressMapV2[*scData]() - b.subConns.Set(selected.addr, selected) -} - -// requestConnectionLocked starts connecting on the subchannel corresponding to -// the current address. If no subchannel exists, one is created. If the current -// subchannel is in TransientFailure, a connection to the next address is -// attempted until a subchannel is found. -func (b *pickfirstBalancer) requestConnectionLocked() { - if !b.addressList.isValid() { - return - } - var lastErr error - for valid := true; valid; valid = b.addressList.increment() { - curAddr := b.addressList.currentAddress() - sd, ok := b.subConns.Get(curAddr) - if !ok { - var err error - // We want to assign the new scData to sd from the outer scope, - // hence we can't use := below. - sd, err = b.newSCData(curAddr) - if err != nil { - // This should never happen, unless the clientConn is being shut - // down. - if b.logger.V(2) { - b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) - } - // Do nothing, the LB policy will be closed soon. - return - } - b.subConns.Set(curAddr, sd) - } - - switch sd.rawConnectivityState { - case connectivity.Idle: - sd.subConn.Connect() - b.scheduleNextConnectionLocked() - return - case connectivity.TransientFailure: - // The SubConn is being re-used and failed during a previous pass - // over the addressList. It has not completed backoff yet. - // Mark it as having failed and try the next address. - sd.connectionFailedInFirstPass = true - lastErr = sd.lastErr - continue - case connectivity.Connecting: - // Wait for the connection attempt to complete or the timer to fire - // before attempting the next address. - b.scheduleNextConnectionLocked() - return - default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) - return - - } - } - - // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass if possible. - b.endFirstPassIfPossibleLocked(lastErr) -} - -func (b *pickfirstBalancer) scheduleNextConnectionLocked() { - b.cancelConnectionTimer() - if !b.addressList.hasNext() { - return - } - curAddr := b.addressList.currentAddress() - cancelled := false // Access to this is protected by the balancer's mutex. - closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { - b.mu.Lock() - defer b.mu.Unlock() - // If the scheduled task is cancelled while acquiring the mutex, return. - if cancelled { - return - } - if b.logger.V(2) { - b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) - } - if b.addressList.increment() { - b.requestConnectionLocked() - } - }) - // Access to the cancellation callback held by the balancer is guarded by - // the balancer's mutex, so it's safe to set the boolean from the callback. - b.cancelConnectionTimer = sync.OnceFunc(func() { - cancelled = true - closeFn() - }) -} - -func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - oldState := sd.rawConnectivityState - sd.rawConnectivityState = newState.ConnectivityState - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes this - // SubConn. - if !b.isActiveSCData(sd) { - return - } - if newState.ConnectivityState == connectivity.Shutdown { - sd.effectiveState = connectivity.Shutdown - return - } - - // Record a connection attempt when exiting CONNECTING. - if newState.ConnectivityState == connectivity.TransientFailure { - sd.connectionFailedInFirstPass = true - connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) - } - - if newState.ConnectivityState == connectivity.Ready { - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - b.shutdownRemainingLocked(sd) - if !b.addressList.seekTo(sd.addr) { - // This should not fail as we should have only one SubConn after - // entering READY. The SubConn should be present in the addressList. - b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) - return - } - if !b.healthCheckingEnabled { - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) - } - - sd.effectiveState = connectivity.Ready - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - return - } - if b.logger.V(2) { - b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) - } - // Send a CONNECTING update to take the SubConn out of sticky-TF if - // required. - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { - b.updateSubConnHealthState(sd, scs) - }) - return - } - - // If the LB policy is READY, and it receives a subchannel state change, - // it means that the READY subchannel has failed. - // A SubConn can also transition from CONNECTING directly to IDLE when - // a transport is successfully created, but the connection fails - // before the SubConn can send the notification for READY. We treat - // this as a successful connection and transition to IDLE. - // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second - // part of the if condition below once the issue is fixed. - if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { - // Once a transport fails, the balancer enters IDLE and starts from - // the first address when the picker is used. - b.shutdownRemainingLocked(sd) - sd.effectiveState = newState.ConnectivityState - // READY SubConn interspliced in between CONNECTING and IDLE, need to - // account for that. - if oldState == connectivity.Connecting { - // A known issue (https://github.com/grpc/grpc-go/issues/7862) - // causes a race that prevents the READY state change notification. - // This works around it. - connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) - } - disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) - b.addressList.reset() - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, - }) - return - } - - if b.firstPass { - switch newState.ConnectivityState { - case connectivity.Connecting: - // The effective state can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in - // TRANSIENT_FAILURE until it's READY. See A62. - if sd.effectiveState != connectivity.TransientFailure { - sd.effectiveState = connectivity.Connecting - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - } - case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError - sd.effectiveState = connectivity.TransientFailure - // Since we're re-using common SubConns while handling resolver - // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. Happy Eyeballs will also - // cause out of order updates to arrive. - - if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - b.cancelConnectionTimer() - if b.addressList.increment() { - b.requestConnectionLocked() - return - } - } - - // End the first pass if we've seen a TRANSIENT_FAILURE from all - // SubConns once. - b.endFirstPassIfPossibleLocked(newState.ConnectionError) - } - return - } - - // We have finished the first pass, keep re-connecting failing SubConns. - switch newState.ConnectivityState { - case connectivity.TransientFailure: - b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError - if b.numTF%b.subConns.Len() == 0 { - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: newState.ConnectionError}, - }) - } - // We don't need to request re-resolution since the SubConn already - // does that before reporting TRANSIENT_FAILURE. - // TODO: #7534 - Move re-resolution requests from SubConn into - // pick_first. - case connectivity.Idle: - sd.subConn.Connect() - } -} - -// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the -// addresses are tried and their SubConns have reported a failure. -func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { - // An optimization to avoid iterating over the entire SubConn map. - if b.addressList.isValid() { - return - } - // Connect() has been called on all the SubConns. The first pass can be - // ended if all the SubConns have reported a failure. - for _, sd := range b.subConns.Values() { - if !sd.connectionFailedInFirstPass { - return - } - } - b.firstPass = false - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: lastErr}, - }) - // Start re-connecting all the SubConns that are already in IDLE. - for _, sd := range b.subConns.Values() { - if sd.rawConnectivityState == connectivity.Idle { - sd.subConn.Connect() - } - } -} - -func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { - activeSD, found := b.subConns.Get(sd.addr) - return found && activeSD == sd -} - -func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { - b.mu.Lock() - defer b.mu.Unlock() - // Previously relevant SubConns can still callback with state updates. - // To prevent pickers from returning these obsolete SubConns, this logic - // is included to check if the current list of active SubConns includes - // this SubConn. - if !b.isActiveSCData(sd) { - return - } - sd.effectiveState = state.ConnectivityState - switch state.ConnectivityState { - case connectivity.Ready: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, - }) - case connectivity.TransientFailure: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, - }) - case connectivity.Connecting: - b.updateBalancerState(balancer.State{ - ConnectivityState: connectivity.Connecting, - Picker: &picker{err: balancer.ErrNoSubConnAvailable}, - }) - default: - b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) - } -} - -// updateBalancerState stores the state reported to the channel and calls -// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate -// updates to the channel. -func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { - // In case of TransientFailures allow the picker to be updated to update - // the connectivity error, in all other cases don't send duplicate state - // updates. - if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { - return - } - b.forceUpdateConcludedStateLocked(newState) -} - -// forceUpdateConcludedStateLocked stores the state reported to the channel and -// calls ClientConn.UpdateState(). -// A separate function is defined to force update the ClientConn state since the -// channel doesn't correctly assume that LB policies start in CONNECTING and -// relies on LB policy to send an initial CONNECTING update. -func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { - b.state = newState.ConnectivityState - b.cc.UpdateState(newState) -} - -type picker struct { - result balancer.PickResult - err error -} - -func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return p.result, p.err -} - -// idlePicker is used when the SubConn is IDLE and kicks the SubConn into -// CONNECTING when Pick is called. -type idlePicker struct { - exitIdle func() -} - -func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - i.exitIdle() - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} - -// addressList manages sequentially iterating over addresses present in a list -// of endpoints. It provides a 1 dimensional view of the addresses present in -// the endpoints. -// This type is not safe for concurrent access. -type addressList struct { - addresses []resolver.Address - idx int -} - -func (al *addressList) isValid() bool { - return al.idx < len(al.addresses) -} - -func (al *addressList) size() int { - return len(al.addresses) -} - -// increment moves to the next index in the address list. -// This method returns false if it went off the list, true otherwise. -func (al *addressList) increment() bool { - if !al.isValid() { - return false - } - al.idx++ - return al.idx < len(al.addresses) -} - -// currentAddress returns the current address pointed to in the addressList. -// If the list is in an invalid state, it returns an empty address instead. -func (al *addressList) currentAddress() resolver.Address { - if !al.isValid() { - return resolver.Address{} - } - return al.addresses[al.idx] -} - -func (al *addressList) reset() { - al.idx = 0 -} - -func (al *addressList) updateAddrs(addrs []resolver.Address) { - al.addresses = addrs - al.reset() -} - -// seekTo returns false if the needle was not found and the current index was -// left unchanged. -func (al *addressList) seekTo(needle resolver.Address) bool { - for ai, addr := range al.addresses { - if !equalAddressIgnoringBalAttributes(&addr, &needle) { - continue - } - al.idx = ai - return true - } - return false -} - -// hasNext returns whether incrementing the addressList will result in moving -// past the end of the list. If the list has already moved past the end, it -// returns false. -func (al *addressList) hasNext() bool { - if !al.isValid() { - return false - } - return al.idx+1 < len(al.addresses) -} - -// equalAddressIgnoringBalAttributes returns true is a and b are considered -// equal. This is different from the Equal method on the resolver.Address type -// which considers all fields to determine equality. Here, we only consider -// fields that are meaningful to the SubConn. -func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { - return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) -} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 22045bf3..22e6e326 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/endpointsharding" - "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/grpclog" internalgrpclog "google.golang.org/grpc/internal/grpclog" ) @@ -47,7 +47,7 @@ func (bb builder) Name() string { } func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - childBuilder := balancer.Get(pickfirstleaf.Name).Build + childBuilder := balancer.Get(pickfirst.Name).Build bal := &rrBalancer{ cc: cc, Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), @@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. - ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState), }) } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 948a21ef..2c760e62 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func( if acbw.ccb.cc.dopts.disableHealthCheck { return noOpRegisterHealthListenerFn } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } regHealthLisFn := internal.RegisterClientHealthCheckListener if regHealthLisFn == nil { // The health package is not imported. - return noOpRegisterHealthListenerFn - } - cfg := acbw.ac.cc.healthCheckConfig() - if cfg == nil { + channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.") return noOpRegisterHealthListenerFn } return func(ctx context.Context, listener func(balancer.SubConnState)) func() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index b1364a03..42c61cf9 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 3f762285..c0c2c9a7 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -40,11 +40,12 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/stats" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. @@ -210,7 +211,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper() - cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) @@ -456,7 +458,7 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) cc.channelz = channelz.RegisterChannel(parentChannel, target) - cc.addTraceEvent("created") + cc.addTraceEvent(fmt.Sprintf("created for target %q", target)) } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -621,7 +623,8 @@ type ClientConn struct { channelz *channelz.Channel // Channelz object. resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager - metricsRecorderList *stats.MetricsRecorderList + metricsRecorderList *istats.MetricsRecorderList + statsHandler stats.Handler // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index c8e337cd..06f6c6c7 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -44,8 +44,7 @@ type PerRPCCredentials interface { // A54). uri is the URI of the entry point for the request. When supported // by the underlying implementation, ctx can be used for timeout and // cancellation. Additionally, RequestInfo data will be available via ctx - // to this call. TODO(zhaoq): Define the set of the qualified keys instead - // of leaving it as an arbitrary string. + // to this call. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 11d0ae14..dadd21e4 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -27,8 +27,10 @@ package encoding import ( "io" + "slices" "strings" + "google.golang.org/grpc/encoding/internal" "google.golang.org/grpc/internal/grpcutil" ) @@ -36,6 +38,24 @@ import ( // It is intended for grpc internal use only. const Identity = "identity" +func init() { + internal.RegisterCompressorForTesting = func(c Compressor) func() { + name := c.Name() + curCompressor, found := registeredCompressor[name] + RegisterCompressor(c) + return func() { + if found { + registeredCompressor[name] = curCompressor + return + } + delete(registeredCompressor, name) + grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool { + return s == name + }) + } + } +} + // Compressor is used for compressing and decompressing when sending or // receiving messages. // diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go new file mode 100644 index 00000000..ee9acb43 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the encoding package. +package internal + +// RegisterCompressorForTesting registers a compressor in the global compressor +// registry. It returns a cleanup function that should be called at the end +// of the test to unregister the compressor. +// +// This prevents compressors registered in one test from appearing in the +// encoding headers of subsequent tests. +var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func() diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index ceec319d..1ab874c7 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } + // Important: if we remove this Size call then we cannot use + // UseCachedSize in MarshalOptions below. size := proto.Size(vv) + + // MarshalOptions with UseCachedSize allows reusing the result from the + // previous Size call. This is safe here because: + // + // 1. We just computed the size. + // 2. We assume the message is not being mutated concurrently. + // + // Important: If the proto.Size call above is removed, using UseCachedSize + // becomes unsafe and may lead to incorrect marshaling. + // + // For more details, see the doc of UseCachedSize: + // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions + marshalOptions := proto.MarshalOptions{UseCachedSize: true} + if mem.IsBelowBufferPoolingThreshold(size) { - buf, err := proto.Marshal(vv) + buf, err := marshalOptions.Marshal(vv) if err != nil { return nil, err } @@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { } else { pool := mem.DefaultBufferPool() buf := pool.Get(size) - if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil { pool.Put(buf) return nil, err } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index ad75313a..2b57ba65 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -75,6 +75,7 @@ const ( MetricTypeIntHisto MetricTypeFloatHisto MetricTypeIntGauge + MetricTypeIntUpDownCount ) // Int64CountHandle is a typed handle for a int count metric. This handle @@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels . recorder.RecordInt64Count(h, incr, labels...) } +// Int64UpDownCountHandle is a typed handle for an int up-down counter metric. +// This handle is passed at the recording point in order to know which metric +// to record on. +type Int64UpDownCountHandle MetricDescriptor + +// Descriptor returns the int64 up-down counter handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 up-down counter value on the metrics recorder provided. +// The value 'v' can be positive to increment or negative to decrement. +func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) { + recorder.RecordInt64UpDownCount(h, v, labels...) +} + // Float64CountHandle is a typed handle for a float count metric. This handle is // passed at the recording point in order to know which metric to record on. type Float64CountHandle MetricDescriptor @@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { return (*Int64GaugeHandle)(descPtr) } +// RegisterInt64UpDownCount registers the metric description onto the global registry. +// It returns a typed handle to use for recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle { + registerMetric(descriptor.Name, descriptor.Default) + // Set the specific metric type for the up-down counter + descriptor.Type = MetricTypeIntUpDownCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64UpDownCountHandle)(descPtr) +} + // snapshotMetricsRegistryForTesting snapshots the global data of the metrics // registry. Returns a cleanup function that sets the metrics registry to its // original state. diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index ee142360..cb57f1a7 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -38,6 +38,9 @@ type MetricsRecorder interface { // RecordInt64Gauge records the measurement alongside labels on the int // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) + // RecordInt64UpDownCounter records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string) } // Metrics is an experimental legacy alias of the now-stable stats.MetricSet. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 22d263fb..8f7d9f6b 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 11f91668..467392b8 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -83,6 +83,7 @@ func (b *Unbounded) Load() { default: } } else if b.closing && !b.closed { + b.closed = true close(b.c) } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 2bffe477..3b7ba596 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -194,7 +194,7 @@ func (r RefChannelType) String() string { // If channelz is not turned ON, this will simply log the event descriptions. func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { // Log only the trace description associated with the bottom most entity. - d := fmt.Sprintf("[%s]%s", e, desc.Desc) + d := fmt.Sprintf("[%s] %s", e, desc.Desc) switch desc.Severity { case CtUnknown, CtInfo: l.InfoDepth(depth+1, d) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7e060f5e..91f76093 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -52,12 +52,6 @@ var ( // or "false". EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) - // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "false". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) - // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the @@ -75,6 +69,14 @@ var ( // ALTSHandshakerKeepaliveParams is set if we should add the // KeepaliveParams when dial the ALTS handshaker service. ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false) + + // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443 + // to a target address that lacks one. This flag only has an effect when all of + // the following conditions are met: + // - A connect proxy is being used. + // - Target resolution is disabled. + // - The DNS resolver is being used. + EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index e8755155..7685d08b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -68,4 +68,15 @@ var ( // trust. For more details, see: // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false) + + // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata + // configuring use of an HTTP CONNECT proxy via xDS from cluster resources. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md + XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false) + + // XDSBootstrapCallCredsEnabled controls if call credentials can be used in + // xDS bootstrap configuration via the `call_creds` field. For more details, + // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md + XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 8e8e8612..9b6d8a1f 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func (cs *CallbackSerializer) run(ctx context.Context) { defer close(cs.done) - // TODO: when Go 1.21 is the oldest supported version, this loop and Close - // can be replaced with: - // - // context.AfterFunc(ctx, cs.callbacks.Close) - for ctx.Err() == nil { - select { - case <-ctx.Done(): - // Do nothing here. Next iteration of the for loop will not happen, - // since ctx.Err() would be non-nil. - case cb := <-cs.callbacks.Get(): - cs.callbacks.Load() - cb.(func(context.Context))(ctx) - } - } - - // Close the buffer to prevent new callbacks from being added. - cs.callbacks.Close() + // Close the buffer when the context is canceled + // to prevent new callbacks from being added. + context.AfterFunc(ctx, cs.callbacks.Close) - // Run all pending callbacks. + // Run all callbacks. for cb := range cs.callbacks.Get() { cs.callbacks.Load() cb.(func(context.Context))(ctx) diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go index 20b8fb09..5bfa67b7 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -22,11 +22,13 @@ package delegatingresolver import ( "fmt" + "net" "net/http" "net/url" "sync" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/proxyattributes" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport/networktype" @@ -40,6 +42,8 @@ var ( HTTPSProxyFromEnvironment = http.ProxyFromEnvironment ) +const defaultPort = "443" + // delegatingResolver manages both target URI and proxy address resolution by // delegating these tasks to separate child resolvers. Essentially, it acts as // an intermediary between the gRPC ClientConn and the child resolvers. @@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti targetResolver: nopResolver{}, } + addr := target.Endpoint() var err error - r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget { + addr, err = parseTarget(addr) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err) + } + } + + r.proxyURL, err = proxyURLForTarget(addr) if err != nil { - return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err) } // proxy is not configured or proxy address excluded using `NO_PROXY` env @@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti // bypass the target resolver and store the unresolved target address. if target.URL.Scheme == "dns" && !targetResolutionEnabled { r.targetResolverState = &resolver.State{ - Addresses: []resolver.Address{{Addr: target.Endpoint()}}, - Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + Addresses: []resolver.Address{{Addr: addr}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}}, } r.updateTargetResolverState(*r.targetResolverState) return r, nil @@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool { return false } +// parseTarget takes a target string and ensures it is a valid "host:port" target. +// +// It does the following: +// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"), +// it is returned as is. +// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost", +// returning "localhost:80". +// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort +// is added. +// +// An error is returned for empty targets or targets with a trailing colon +// but no port (e.g., "host:"). +func parseTarget(target string) (string, error) { + if target == "" { + return "", fmt.Errorf("missing address") + } + + host, port, err := net.SplitHostPort(target) + if err != nil { + // If SplitHostPort fails, it's likely because the port is missing. + // We append the default port and return the result. + return net.JoinHostPort(target, defaultPort), nil + } + + // If SplitHostPort succeeds, we check for edge cases. + if port == "" { + // A success with an empty port means the target had a trailing colon, + // e.g., "host:", which is an error. + return "", fmt.Errorf("missing port after port-separator colon") + } + if host == "" { + // A success with an empty host means the target was like ":80". + // We default the host to "localhost". + host = "localhost" + } + return net.JoinHostPort(host, port), nil +} + func skipProxy(address resolver.Address) bool { // Avoid proxy when network is not tcp. networkType, ok := networktype.Get(address) diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index 79044657..d5f7e4d6 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordInt64UpDownCount records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64UpDownCount(handle, incr, labels...) + } +} + // RecordFloat64Count records the measurement alongside labels on the float // count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go new file mode 100644 index 00000000..49019b80 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/stats.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2025 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + + "google.golang.org/grpc/stats" +) + +type combinedHandler struct { + handlers []stats.Handler +} + +// NewCombinedHandler combines multiple stats.Handlers into a single handler. +// +// It returns nil if no handlers are provided. If only one handler is +// provided, it is returned directly without wrapping. +func NewCombinedHandler(handlers ...stats.Handler) stats.Handler { + switch len(handlers) { + case 0: + return nil + case 1: + return handlers[0] + default: + return &combinedHandler{handlers: handlers} + } +} + +func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagRPC(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) { + for _, h := range ch.handlers { + h.HandleRPC(ctx, stats) + } +} + +func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { + for _, h := range ch.handlers { + ctx = h.TagConn(ctx, info) + } + return ctx +} + +func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) { + for _, h := range ch.handlers { + h.HandleConn(ctx, stats) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go index ccc0e017..98045251 100644 --- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -29,25 +29,27 @@ import ( // ClientStream implements streaming functionality for a gRPC client. type ClientStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. ct *http2Client done chan struct{} // closed at the end of stream to unblock writers. doneFunc func() // invoked at the end of stream. - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + headerChan chan struct{} // closed to indicate the end of header metadata. + header metadata.MD // the received header metadata + + status *status.Status // the status error received from the server + + // Non-pointer fields are at the end to optimize GC allocations. + // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). - headerValid bool - header metadata.MD // the received header metadata - noHeaders bool // set if the client never received headers (set only after the stream is done). - - bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream - unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream - - status *status.Status // the status error received from the server + headerValid bool + noHeaders bool // set if the client never received headers (set only after the stream is done). + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream } // Read reads an n byte message from the input stream. @@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool { func (s *ClientStream) Status() *status.Status { return s.status } + +func (s *ClientStream) requestRead(n int) { + s.ct.adjustWindow(s, uint32(n)) +} + +func (s *ClientStream) updateWindow(n int) { + s.ct.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index a2831e5d..2dcd1e63 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -496,6 +496,16 @@ const ( serverSide ) +// maxWriteBufSize is the maximum length (number of elements) the cached +// writeBuf can grow to. The length depends on the number of buffers +// contained within the BufferSlice produced by the codec, which is +// generally small. +// +// If a writeBuf larger than this limit is required, it will be allocated +// and freed after use, rather than being cached. This avoids holding +// on to large amounts of memory. +const maxWriteBufSize = 64 + // Loopy receives frames from the control buffer. // Each frame is handled individually; most of the work done by loopy goes // into handling data frames. Loopy maintains a queue of active streams, and each @@ -530,6 +540,8 @@ type loopyWriter struct { // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) + + writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek. } func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { @@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } l.estdStreams[h.streamID] = str } @@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { } // Case 2: Client wants to originate stream. str := &outStream{ - id: h.streamID, - state: empty, - itl: &itemList{}, - wq: h.wq, - reader: mem.BufferSlice{}.Reader(), + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, } return l.originateStream(str, h) } @@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) { if str == nil { return true, nil } - reader := str.reader + reader := &str.reader dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. if !dataItem.processing { dataItem.processing = true - str.reader.Reset(dataItem.data) + reader.Reset(dataItem.data) dataItem.data.Free() } // A data item is represented by a dataFrame, since it later translates into @@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) { if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true - if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream - _ = reader.Close() + reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) { remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize size := hSize + dSize - var buf *[]byte - - if hSize != 0 && dSize == 0 { - buf = &dataItem.h - } else { - // Note: this is only necessary because the http2.Framer does not support - // partially writing a frame, so the sequence must be materialized into a buffer. - // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. - pool := l.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() + l.writeBuf = l.writeBuf[:0] + if hSize > 0 { + l.writeBuf = append(l.writeBuf, dataItem.h[:hSize]) + } + if dSize > 0 { + var err error + l.writeBuf, err = reader.Peek(dSize, l.writeBuf) + if err != nil { + // This must never happen since the reader must have at least dSize + // bytes. + // Log an error to fail tests. + l.logger.Errorf("unexpected error while reading Data frame payload: %v", err) + return false, err } - buf = pool.Get(size) - defer pool.Put(buf) - - copy((*buf)[:hSize], dataItem.h) - _, _ = reader.Read((*buf)[hSize:]) } // Now that outgoing flow controls are checked we can replenish str's write quota @@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) { if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf) + reader.Discard(dSize) + if cap(l.writeBuf) > maxWriteBufSize { + l.writeBuf = nil + } else { + clear(l.writeBuf) + } + if err != nil { return false, err } str.bytesOutStanding += size @@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) { dataItem.h = dataItem.h[hSize:] if remainingBytes == 0 { // All the data from that message was written out. - _ = reader.Close() + reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index dfc0f224..7cfbc963 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -28,7 +28,7 @@ import ( // writeQuota is a soft limit on the amount of data a stream can // schedule before some of it is written out. type writeQuota struct { - quota int32 + _ noCopy // get waits on read from when quota goes less than or equal to zero. // replenish writes on it when quota goes positive again. ch chan struct{} @@ -38,16 +38,17 @@ type writeQuota struct { // It is implemented as a field so that it can be updated // by tests. replenish func(n int) + quota int32 } -func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { - w := &writeQuota{ - quota: sz, - ch: make(chan struct{}, 1), - done: done, - } +// init allows a writeQuota to be initialized in-place, which is useful for +// resetting a buffer or for avoiding a heap allocation when the buffer is +// embedded in another struct. +func (w *writeQuota) init(sz int32, done <-chan struct{}) { + w.quota = sz + w.ch = make(chan struct{}, 1) + w.done = done w.replenish = w.realReplenish - return w } func (w *writeQuota) get(sz int32) error { @@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error { func (w *writeQuota) realReplenish(n int) { sz := int32(n) - a := atomic.AddInt32(&w.quota, sz) - b := a - sz - if b <= 0 && a > 0 { + newQuota := atomic.AddInt32(&w.quota, sz) + previousQuota := newQuota - sz + if previousQuota <= 0 && newQuota > 0 { select { case w.ch <- struct{}{}: default: diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d954a64c..7ab3422b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -170,7 +170,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats stats.Handler logger *grpclog.PrefixLogger bufferPool mem.BufferPool @@ -274,15 +274,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status } }) - if err == nil { // transport has not been closed + if err == nil && ht.stats != nil { // transport has not been closed // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. s.hdrMu.Lock() - for _, sh := range ht.stats { - sh.HandleRPC(s.Context(), &stats.OutTrailer{ - Trailer: s.trailer.Copy(), - }) - } + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) s.hdrMu.Unlock() } ht.Close(errors.New("finished writing status")) @@ -374,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e ht.rw.(http.Flusher).Flush() }) - if err == nil { - for _, sh := range ht.stats { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutHeader{ - Header: md.Copy(), - Compression: s.sendCompress, - }) - } + if err == nil && ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } return err } +func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) { +} + +func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) { +} + func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc @@ -411,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req s := &ServerStream{ - Stream: &Stream{ + Stream: Stream{ id: 0, // irrelevant ctx: ctx, - requestRead: func(int) {}, - buf: newRecvBuffer(), method: req.URL.Path, recvCompress: req.Header.Get("grpc-encoding"), contentSubtype: ht.contentSubtype, @@ -424,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream st: ht, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } - s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, - windowHandler: func(int) {}, + s.Stream.buf.init() + s.readRequester = s + s.trReader = transportReader{ + reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf}, + windowHandler: s, } // readerDone is closed when the Body.Read-ing goroutine exits. diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5467fe97..65b4ab24 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -44,6 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/internal/proxyattributes" + istats "google.golang.org/grpc/internal/stats" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -105,7 +106,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandlers []stats.Handler + statsHandler stats.Handler initialWindowSize int32 @@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts writerDone: make(chan struct{}), goAway: make(chan struct{}), keepaliveDone: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandlers: opts.StatsHandlers, + statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...), initialWindowSize: initialWindowSize, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, @@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.statsHandlers { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) - connBegin := &stats.ConnBegin{ + t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{ Client: true, - } - sh.HandleConn(t.ctx, connBegin) + }) } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &ClientStream{ - Stream: &Stream{ + Stream: Stream{ method: callHdr.Method, sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), contentSubtype: callHdr.ContentSubtype, }, ct: t, @@ -492,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt headerChan: make(chan struct{}), doneFunc: callHdr.DoneFunc, } - s.wq = newWriteQuota(defaultWriteQuota, s.done) - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.Stream.buf.init() + s.Stream.wq.init(defaultWriteQuota, s.done) + s.readRequester = s // The client side stream context should have exactly the same life cycle with the user provided context. // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. // So we use the original context here instead of creating a copy. s.ctx = ctx - s.trReader = &transportReader{ - reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctx.Done(), - recv: s.buf, - closeStream: func(err error) { - s.Close(err) - }, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + s.trReader = transportReader{ + reader: recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: &s.buf, + clientStream: s, }, + windowHandler: s, } return s } @@ -556,6 +550,19 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) // Make the slice of certain predictable size to reduce allocations made by append. hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te hfLen += len(authData) + len(callAuthData) + registeredCompressors := t.registeredCompressors + if callHdr.PreviousAttempts > 0 { + hfLen++ + } + if callHdr.SendCompress != "" { + hfLen++ + } + if registeredCompressors != "" { + hfLen++ + } + if _, ok := ctx.Deadline(); ok { + hfLen++ + } headerFields := make([]hpack.HeaderField, 0, hfLen) headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) @@ -568,7 +575,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } - registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) // Include the outgoing compressor name when compressor is not registered @@ -811,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil }, onOrphaned: cleanup, - wq: s.wq, + wq: &s.wq, } firstTry := true var ch chan struct{} @@ -842,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS transportDrainRequired = t.nextID > MaxStreamID s.id = hdr.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + s.fc = inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() @@ -893,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if len(t.statsHandlers) != 0 { + if t.statsHandler != nil { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - for _, sh := range t.statsHandlers { - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - // Note: Creating a new stats object to prevent pollution. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, - } - sh.HandleRPC(s.ctx, outHeader) - } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + }) } if transportDrainRequired { if t.logger.V(logLevel) { @@ -990,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode // accessed anymore. func (t *http2Client) Close(err error) { t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + // For background on the deadline value chosen here, see + // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 . + t.conn.SetReadDeadline(time.Now().Add(time.Second)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1051,11 +1056,10 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - for _, sh := range t.statsHandlers { - connEnd := &stats.ConnEnd{ + if t.statsHandler != nil { + t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{ Client: true, - } - sh.HandleConn(t.ctx, connEnd) + }) } } @@ -1166,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) { }) } -func (t *http2Client) handleData(f *http2.DataFrame) { +func (t *http2Client) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -1210,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } // The server has closed the stream without sending trailers. Record that @@ -1465,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string recvCompress string - httpStatusCode *int httpStatusErr string - rawStatusCode = codes.Unknown + // the code from the grpc-status header, if present + grpcStatusCode = codes.Unknown // headerError is set if an error is encountered while parsing the headers headerError string + httpStatus string ) - if initialHeader { - httpStatusErr = "malformed header: missing HTTP status" - } - for _, hf := range frame.Fields { switch hf.Name { case "content-type": @@ -1495,31 +1489,11 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } - rawStatusCode = codes.Code(uint32(code)) + grpcStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) case ":status": - if hf.Value == "200" { - httpStatusErr = "" - statusCode := 200 - httpStatusCode = &statusCode - break - } - - c, err := strconv.ParseInt(hf.Value, 10, 32) - if err != nil { - se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) - t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) - return - } - statusCode := int(c) - httpStatusCode = &statusCode - - httpStatusErr = fmt.Sprintf( - "unexpected HTTP status code received from server: %d (%s)", - statusCode, - http.StatusText(statusCode), - ) + httpStatus = hf.Value default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break @@ -1534,25 +1508,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - if !isGRPC || httpStatusErr != "" { - var code = codes.Internal // when header does not include HTTP status, return INTERNAL - - if httpStatusCode != nil { + // If a non-gRPC response is received, then evaluate the HTTP status to + // process the response and close the stream. + // In case http status doesn't provide any error information (status : 200), + // then evalute response code to be Unknown. + if !isGRPC { + var grpcErrorCode = codes.Internal + if httpStatus == "" { + httpStatusErr = "malformed header: missing HTTP status" + } else { + // Parse the status codes (e.g. "200", 404"). + statusCode, err := strconv.Atoi(httpStatus) + if err != nil { + se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + if statusCode >= 100 && statusCode < 200 { + if endStream { + se := status.New(codes.Internal, fmt.Sprintf( + "protocol error: informational header with status code %d must not have END_STREAM set", statusCode)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + } + // In case of informational headers, return. + return + } + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) var ok bool - code, ok = HTTPStatusConvTab[*httpStatusCode] + grpcErrorCode, ok = HTTPStatusConvTab[statusCode] if !ok { - code = codes.Unknown + grpcErrorCode = codes.Unknown } } var errs []string if httpStatusErr != "" { errs = append(errs, httpStatusErr) } + if contentTypeErr != "" { errs = append(errs, contentTypeErr) } - // Verify the HTTP response is a 200. - se := status.New(code, strings.Join(errs, "; ")) + + se := status.New(grpcErrorCode, strings.Join(errs, "; ")) t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) return } @@ -1583,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } } - for _, sh := range t.statsHandlers { + if t.statsHandler != nil { if !endStream { - inHeader := &stats.InHeader{ + t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + }) } else { - inTrailer := &stats.InTrailer{ + t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), - } - sh.HandleRPC(s.ctx, inTrailer) + }) } } @@ -1606,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. @@ -1653,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) { // loop to keep reading incoming messages on this transport. for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() if t.keepaliveEnabled { atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } @@ -1668,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) { if s != nil { // use error detail to provide better err message code := http2ErrConvTab[se.Code] - errorDetail := t.framer.fr.ErrorDetail() + errorDetail := t.framer.errorDetail() var msg string if errorDetail != nil { msg = errorDetail.Error() @@ -1686,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: t.operateHeaders(frame) - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83cee314..6f78a6b0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,8 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/protobuf/proto" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" @@ -42,7 +44,6 @@ import ( istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/mem" - "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -86,7 +87,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats []stats.Handler + stats stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*ServerStream), - stats: config.StatsHandlers, + stats: config.StatsHandler, kp: kp, idle: time.Now(), kep: kep, @@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.maxStreamID = streamID - buf := newRecvBuffer() s := &ServerStream{ - Stream: &Stream{ - id: streamID, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + Stream: Stream{ + id: streamID, + fc: inFlow{limit: uint32(t.initialWindowSize)}, }, st: t, headerWireLength: int(frame.Header().Length), } + s.Stream.buf.init() var ( // if false, content-type was missing or invalid isGRPC = false @@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.channelz.SocketMetrics.StreamsStarted.Add(1) t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } - s.requestRead = func(n int) { - t.adjustWindow(s, uint32(n)) - } + s.readRequester = s s.ctxDone = s.ctx.Done() - s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) - s.trReader = &transportReader{ - reader: &recvBufferReader{ + s.Stream.wq.init(defaultWriteQuota, s.ctxDone) + s.trReader = transportReader{ + reader: recvBufferReader{ ctx: s.ctx, ctxDone: s.ctxDone, - recv: s.buf, - }, - windowHandler: func(n int) { - t.updateWindow(s, uint32(n)) + recv: &s.buf, }, + windowHandler: s, } // Register the stream with loopy. t.controlBuf.put(®isterStream{ streamID: s.id, - wq: s.wq, + wq: &s.wq, }) handle(s) return nil @@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }() for { t.controlBuf.throttle() - frame, err := t.framer.fr.ReadFrame() + frame, err := t.framer.readFrame() atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { @@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre }) continue } - case *http2.DataFrame: + case *parsedDataFrame: t.handleData(frame) + frame.data.Free() case *http2.RSTStreamFrame: t.handleRSTStream(frame) case *http2.SettingsFrame: @@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) { } -func (t *http2Server) handleData(f *http2.DataFrame) { +func (t *http2Server) handleData(f *parsedDataFrame) { size := f.Header().Length var sendBDPPing bool if t.bdpEst != nil { @@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) { t.closeStream(s, true, http2.ErrCodeFlowControl, false) return } + dataLen := f.data.Len() if f.Header().Flags.Has(http2.FlagDataPadded) { - if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + if w := s.fc.onRead(size - uint32(dataLen)); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) } } - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - if len(f.Data()) > 0 { - pool := t.bufferPool - if pool == nil { - // Note that this is only supposed to be nil in tests. Otherwise, stream is - // always initialized with a BufferPool. - pool = mem.DefaultBufferPool() - } - s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + if dataLen > 0 { + f.data.Ref() + s.write(recvMsg{buffer: f.data}) } } if f.StreamEnded() { @@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - for _, sh := range t.stats { + if t.stats != nil { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. - outHeader := &stats.OutHeader{ + t.stats.HandleRPC(s.Context(), &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, - } - sh.HandleRPC(s.Context(), outHeader) + }) } return nil } @@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - for _, sh := range t.stats { + if t.stats != nil { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - sh.HandleRPC(s.Context(), &stats.OutTrailer{ + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() - if _, ok := t.activeStreams[s.id]; ok { + _, isActive := t.activeStreams[s.id] + if isActive { delete(t.activeStreams, s.id) if len(t.activeStreams) == 0 { t.idle = time.Now() @@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { } t.mu.Unlock() - if channelz.IsOn() { + if isActive && channelz.IsOn() { if eosReceived { t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index e3663f87..6209eb23 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -25,7 +25,6 @@ import ( "fmt" "io" "math" - "net" "net/http" "net/url" "strconv" @@ -37,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" ) const ( @@ -300,11 +300,11 @@ type bufWriter struct { buf []byte offset int batchSize int - conn net.Conn + conn io.Writer err error } -func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { +func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter { w := &bufWriter{ batchSize: batchSize, conn: conn, @@ -388,15 +388,35 @@ func toIOError(err error) error { return ioError{error: err} } +type parsedDataFrame struct { + http2.FrameHeader + data mem.Buffer +} + +func (df *parsedDataFrame) StreamEnded() bool { + return df.FrameHeader.Flags.Has(http2.FlagDataEndStream) +} + type framer struct { - writer *bufWriter - fr *http2.Framer + writer *bufWriter + fr *http2.Framer + headerBuf []byte // cached slice for framer headers to reduce heap allocs. + reader io.Reader + dataFrame parsedDataFrame // Cached data frame to avoid heap allocations. + pool mem.BufferPool + errDetail error } var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { +func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer { + if memPool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream + // is always initialized with a BufferPool. + memPool = mem.DefaultBufferPool() + } + if writeBufferSize < 0 { writeBufferSize = 0 } @@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu f := &framer{ writer: w, fr: http2.NewFramer(w, r), + reader: r, + pool: memPool, } f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. @@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } +// writeData writes a DATA frame. +// +// It is the caller's responsibility not to violate the maximum frame size. +func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error { + var flags http2.Flags + if endStream { + flags = http2.FlagDataEndStream + } + length := uint32(0) + for _, d := range data { + length += uint32(len(d)) + } + // TODO: Replace the header write with the framer API being added in + // https://github.com/golang/go/issues/66655. + f.headerBuf = append(f.headerBuf[:0], + byte(length>>16), + byte(length>>8), + byte(length), + byte(http2.FrameData), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) + if _, err := f.writer.Write(f.headerBuf); err != nil { + return err + } + for _, d := range data { + if _, err := f.writer.Write(d); err != nil { + return err + } + } + return nil +} + +// readFrame reads a single frame. The returned Frame is only valid +// until the next call to readFrame. +func (f *framer) readFrame() (any, error) { + f.errDetail = nil + fh, err := f.fr.ReadFrameHeader() + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + // Read the data frame directly from the underlying io.Reader to avoid + // copies. + if fh.Type == http2.FrameData { + err = f.readDataFrame(fh) + return &f.dataFrame, err + } + fr, err := f.fr.ReadFrameForHeader(fh) + if err != nil { + f.errDetail = f.fr.ErrorDetail() + return nil, err + } + return fr, err +} + +// errorDetail returns a more detailed error of the last error +// returned by framer.readFrame. For instance, if readFrame +// returns a StreamError with code PROTOCOL_ERROR, errorDetail +// will say exactly what was invalid. errorDetail is not guaranteed +// to return a non-nil value. +// errorDetail is reset after the next call to readFrame. +func (f *framer) errorDetail() error { + return f.errDetail +} + +func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + f.errDetail = errors.New("DATA frame with stream ID 0") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This + // conversion is performed by mem.NewBuffer. To avoid the extra allocation + // a []byte is allocated directly if required and cast to a mem.SliceBuffer. + var buf []byte + // poolHandle is the pointer returned by the buffer pool (if it's used.). + var poolHandle *[]byte + useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length)) + if useBufferPool { + poolHandle = f.pool.Get(int(fh.Length)) + buf = *poolHandle + defer func() { + if err != nil { + f.pool.Put(poolHandle) + } + }() + } else { + buf = make([]byte, int(fh.Length)) + } + if fh.Flags.Has(http2.FlagDataPadded) { + if fh.Length == 0 { + return io.ErrUnexpectedEOF + } + // This initial 1-byte read can be inefficient for unbuffered readers, + // but it allows the rest of the payload to be read directly to the + // start of the destination slice. This makes it easy to return the + // original slice back to the buffer pool. + if _, err := io.ReadFull(f.reader, buf[:1]); err != nil { + return err + } + padSize := buf[0] + buf = buf[:len(buf)-1] + if int(padSize) > len(buf) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + f.errDetail = errors.New("pad size larger than data payload") + return http2.ConnectionError(http2.ErrCodeProtocol) + } + if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + buf = buf[:len(buf)-int(padSize)] + } else if _, err := io.ReadFull(f.reader, buf); err != nil { + return err + } + + f.dataFrame.FrameHeader = fh + if useBufferPool { + // Update the handle to point to the (potentially re-sliced) buf. + *poolHandle = buf + f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool) + } else { + f.dataFrame.data = mem.SliceBuffer(buf) + } + return nil +} + +func (df *parsedDataFrame) Header() http2.FrameHeader { + return df.FrameHeader +} + func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go index cf8da0b5..ed6a13b7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -32,7 +32,7 @@ import ( // ServerStream implements streaming functionality for a gRPC server. type ServerStream struct { - *Stream // Embed for common stream functionality. + Stream // Embed for common stream functionality. st internalServerTransport ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) @@ -43,12 +43,13 @@ type ServerStream struct { // Holds compressor names passed in grpc-accept-encoding metadata from the // client. clientAdvertisedCompressors string - headerWireLength int // hdrMu protects outgoing header and trailer metadata. hdrMu sync.Mutex header metadata.MD // the outgoing header metadata. Updated by WriteHeader. headerSent atomic.Bool // atomically set when the headers are sent out. + + headerWireLength int } // Read reads an n byte message from the input stream. @@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error { s.hdrMu.Unlock() return nil } + +func (s *ServerStream) requestRead(n int) { + s.st.adjustWindow(s, uint32(n)) +} + +func (s *ServerStream) updateWindow(n int) { + s.st.updateWindow(s, uint32(n)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 7dd53e80..5ff83a7d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -68,11 +68,11 @@ type recvBuffer struct { err error } -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan recvMsg, 1), - } - return b +// init allows a recvBuffer to be initialized in-place, which is useful +// for resetting a buffer or for avoiding a heap allocation when the buffer +// is embedded in another struct. +func (b *recvBuffer) init() { + b.c = make(chan recvMsg, 1) } func (b *recvBuffer) put(r recvMsg) { @@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg { // recvBufferReader implements io.Reader interface to read the data from // recvBuffer. type recvBufferReader struct { - closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. - ctx context.Context - ctxDone <-chan struct{} // cache of ctx.Done() (for performance). - recv *recvBuffer - last mem.Buffer // Stores the remaining data in the previous calls. - err error + _ noCopy + clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error } func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { @@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { n, r.last = mem.ReadUnsafe(header, r.last) return n, nil } - if r.closeStream != nil { + if r.clientStream != nil { n, r.err = r.readMessageHeaderClient(header) } else { n, r.err = r.readMessageHeader(header) @@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { } return buf, nil } - if r.closeStream != nil { + if r.clientStream != nil { buf, r.err = r.readClient(n) } else { buf, r.err = r.read(n) @@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): @@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // TODO: delaying ctx error seems like a unnecessary side effect. What // we really want is to mark the stream as done, and return ctx error // faster. - r.closeStream(ContextErr(r.ctx.Err())) + r.clientStream.Close(ContextErr(r.ctx.Err())) m := <-r.recv.get() return r.readAdditional(m, n) case m := <-r.recv.get(): @@ -285,27 +286,32 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { - id uint32 ctx context.Context // the associated context of the stream method string // the associated RPC method of the stream recvCompress string sendCompress string - buf *recvBuffer - trReader *transportReader - fc *inFlow - wq *writeQuota - - // Callback to state application's intentions to read data. This - // is used to adjust flow control, if needed. - requestRead func(int) - state streamState + readRequester readRequester // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string trailer metadata.MD // the key-value map of trailer metadata. + + // Non-pointer fields are at the end to optimize GC performance. + state streamState + id uint32 + buf recvBuffer + trReader transportReader + fc inFlow + wq writeQuota +} + +// readRequester is used to state application's intentions to read data. This +// is used to adjust flow control, if needed. +type readRequester interface { + requestRead(int) } func (s *Stream) swapState(st streamState) streamState { @@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) { if er := s.trReader.er; er != nil { return er } - s.requestRead(len(header)) + s.readRequester.requestRead(len(header)) for len(header) != 0 { n, err := s.trReader.ReadMessageHeader(header) header = header[n:] @@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { if er := s.trReader.er; er != nil { return nil, er } - s.requestRead(n) + s.readRequester.requestRead(n) for n != 0 { buf, err := s.trReader.Read(n) var bufLen int @@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) { return data, nil } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader *recvBufferReader + _ noCopy // The handler to control the window update procedure for both this // particular stream and the associated transport. - windowHandler func(int) + windowHandler windowHandler er error + reader recvBufferReader +} + +// The handler to control the window update procedure for both this +// particular stream and the associated transport. +type windowHandler interface { + updateWindow(int) } func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { @@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { t.er = err return 0, err } - t.windowHandler(n) + t.windowHandler.updateWindow(n) return n, nil } @@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { t.er = err return buf, err } - t.windowHandler(buf.Len()) + t.windowHandler.updateWindow(buf.Len()) return buf, nil } @@ -454,7 +478,7 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandlers []stats.Handler + StatsHandler stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 @@ -615,6 +639,8 @@ type internalServerTransport interface { write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error writeStatus(s *ServerStream, st *status.Status) error incrMsgRecv() + adjustWindow(s *ServerStream, n uint32) + updateWindow(s *ServerStream, n uint32) } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go index c37c58c0..f211e727 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_pool.go +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -32,6 +32,9 @@ type BufferPool interface { Get(length int) *[]byte // Put returns a buffer to the pool. + // + // The provided pointer must hold a prefix of the buffer obtained via + // BufferPool.Get to ensure the buffer's entire capacity can be re-used. Put(*[]byte) } @@ -118,7 +121,11 @@ type sizedBufferPool struct { } func (p *sizedBufferPool) Get(size int) *[]byte { - buf := p.pool.Get().(*[]byte) + buf, ok := p.pool.Get().(*[]byte) + if !ok { + buf := make([]byte, size, p.defaultSize) + return &buf + } b := *buf clear(b[:cap(b)]) *buf = b[:size] @@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) { func newSizedBufferPool(size int) *sizedBufferPool { return &sizedBufferPool{ - pool: sync.Pool{ - New: func() any { - buf := make([]byte, size) - return &buf - }, - }, defaultSize: size, } } @@ -160,6 +161,7 @@ type simpleBufferPool struct { func (p *simpleBufferPool) Get(size int) *[]byte { bs, ok := p.pool.Get().(*[]byte) if ok && cap(*bs) >= size { + clear((*bs)[:cap(*bs)]) *bs = (*bs)[:size] return bs } diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index af510d20..084fb19c 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -19,6 +19,7 @@ package mem import ( + "fmt" "io" ) @@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { // Reader returns a new Reader for the input slice after taking references to // each underlying buffer. -func (s BufferSlice) Reader() Reader { +func (s BufferSlice) Reader() *Reader { s.Ref() - return &sliceReader{ + return &Reader{ data: s, len: s.Len(), } } // Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface -// with other parts systems. It also provides an additional convenience method -// Remaining(), which returns the number of unread bytes remaining in the slice. +// with other systems. +// // Buffers will be freed as they are read. -type Reader interface { - io.Reader - io.ByteReader - // Close frees the underlying BufferSlice and never returns an error. Subsequent - // calls to Read will return (0, io.EOF). - Close() error - // Remaining returns the number of unread bytes remaining in the slice. - Remaining() int - // Reset frees the currently held buffer slice and starts reading from the - // provided slice. This allows reusing the reader object. - Reset(s BufferSlice) -} - -type sliceReader struct { +// +// A Reader can be constructed from a BufferSlice; alternatively the zero value +// of a Reader may be used after calling Reset on it. +type Reader struct { data BufferSlice len int // The index into data[0].ReadOnlyData(). bufferIdx int } -func (r *sliceReader) Remaining() int { +// Remaining returns the number of unread bytes remaining in the slice. +func (r *Reader) Remaining() int { return r.len } -func (r *sliceReader) Reset(s BufferSlice) { +// Reset frees the currently held buffer slice and starts reading from the +// provided slice. This allows reusing the reader object. +func (r *Reader) Reset(s BufferSlice) { r.data.Free() s.Ref() r.data = s @@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) { r.bufferIdx = 0 } -func (r *sliceReader) Close() error { +// Close frees the underlying BufferSlice and never returns an error. Subsequent +// calls to Read will return (0, io.EOF). +func (r *Reader) Close() error { r.data.Free() r.data = nil r.len = 0 return nil } -func (r *sliceReader) freeFirstBufferIfEmpty() bool { +func (r *Reader) freeFirstBufferIfEmpty() bool { if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { return false } @@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool { return true } -func (r *sliceReader) Read(buf []byte) (n int, _ error) { +func (r *Reader) Read(buf []byte) (n int, _ error) { if r.len == 0 { return 0, io.EOF } @@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) { return n, nil } -func (r *sliceReader) ReadByte() (byte, error) { +// ReadByte reads a single byte. +func (r *Reader) ReadByte() (byte, error) { if r.len == 0 { return 0, io.EOF } @@ -290,3 +287,59 @@ nextBuffer: } } } + +// Discard skips the next n bytes, returning the number of bytes discarded. +// +// It frees buffers as they are fully consumed. +// +// If Discard skips fewer than n bytes, it also returns an error. +func (r *Reader) Discard(n int) (discarded int, err error) { + total := n + for n > 0 && r.len > 0 { + curData := r.data[0].ReadOnlyData() + curSize := min(n, len(curData)-r.bufferIdx) + n -= curSize + r.len -= curSize + r.bufferIdx += curSize + if r.bufferIdx >= len(curData) { + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + } + } + discarded = total - n + if n > 0 { + return discarded, fmt.Errorf("insufficient bytes in reader") + } + return discarded, nil +} + +// Peek returns the next n bytes without advancing the reader. +// +// Peek appends results to the provided res slice and returns the updated slice. +// This pattern allows re-using the storage of res if it has sufficient +// capacity. +// +// The returned subslices are views into the underlying buffers and are only +// valid until the reader is advanced past the corresponding buffer. +// +// If Peek returns fewer than n bytes, it also returns an error. +func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) { + for i := 0; n > 0 && i < len(r.data); i++ { + curData := r.data[i].ReadOnlyData() + start := 0 + if i == 0 { + start = r.bufferIdx + } + curSize := min(n, len(curData)-start) + if curSize == 0 { + continue + } + res = append(res, curData[start:start+curSize]) + n -= curSize + } + if n > 0 { + return nil, fmt.Errorf("insufficient bytes in reader") + } + return res, nil +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index ee0ff969..1e783feb 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { } // check if the context has the relevant information to prepareMsg - if rpcInfo.preloaderInfo == nil { - return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") - } if rpcInfo.preloaderInfo.codec == nil { return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 47ea09f5..6b04c9e8 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -657,8 +657,20 @@ type streamReader interface { Read(n int) (mem.BufferSlice, error) } +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://golang.org/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct { +} + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} + // parser reads complete gRPC messages from the underlying reader. type parser struct { + _ noCopy // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. @@ -949,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR // Information about RPC type rpcInfo struct { failfast bool - preloaderInfo *compressorInfo + preloaderInfo compressorInfo } // Information about Preloader @@ -968,7 +980,7 @@ type rpcInfoContextKey struct{} func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ failfast: failfast, - preloaderInfo: &compressorInfo{ + preloaderInfo: compressorInfo{ codec: codec, cp: cp, comp: comp, diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 1da2a542..ddd37734 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -124,7 +124,8 @@ type serviceInfo struct { // Server is a gRPC server to serve RPC requests. type Server struct { - opts serverOptions + opts serverOptions + statsHandler stats.Handler mu sync.Mutex // guards following lis map[net.Listener]bool @@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server { o.apply(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[string]map[transport.ServerTransport]bool), - services: make(map[string]*serviceInfo), - quit: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - channelz: channelz.RegisterServer(""), + lis: make(map[net.Listener]bool), + opts: opts, + statsHandler: istats.NewCombinedHandler(opts.statsHandlers...), + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandlers: s.opts.statsHandlers, + StatsHandler: s.statsHandler, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { ctx = transport.SetConnection(ctx, rawConn) ctx = peer.NewContext(ctx, st.Peer()) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + if s.statsHandler != nil { + ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{ RemoteAddr: st.Peer().Addr, LocalAddr: st.Peer().LocalAddr, }) - sh.HandleConn(ctx, &stats.ConnBegin{}) + s.statsHandler.HandleConn(ctx, &stats.ConnBegin{}) } defer func() { st.Close(errors.New("finished serving streams for the server transport")) - for _, sh := range s.opts.statsHandlers { - sh.HandleConn(ctx, &stats.ConnEnd{}) + if s.statsHandler != nil { + s.statsHandler.HandleConn(ctx, &stats.ConnEnd{}) } }() @@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = stream.Write(hdr, payload, opts) - if err == nil { - if len(s.opts.statsHandlers) != 0 { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) - } - } + if err == nil && s.statsHandler != nil { + s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) } return err } @@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - shs := s.opts.statsHandlers - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + sh := s.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - for _, sh := range shs { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: false, IsServerStream: false, } @@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt trInfo.tr.Finish() } - for _, sh := range shs { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } var payInfo *payloadInfo - if len(shs) != 0 || len(binlogs) != 0 { + if sh != nil || len(binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - for _, sh := range shs { + if sh != nil { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if channelz.IsOn() { s.incrCallsStarted() } - shs := s.opts.statsHandlers + sh := s.statsHandler var statsBegin *stats.Begin - if len(shs) != 0 { - beginTime := time.Now() + if sh != nil { statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: time.Now(), IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - for _, sh := range shs { - sh.HandleRPC(ctx, statsBegin) - } + sh.HandleRPC(ctx, statsBegin) } ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, s: stream, - p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + p: parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), desc: sd, maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: shs, + statsHandler: sh, } - if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if sh != nil || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv ss.mu.Unlock() } - if len(shs) != 0 { + if sh != nil { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - for _, sh := range shs { - sh.HandleRPC(ctx, end) - } + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser method := sm[pos+1:] // FromIncomingContext is expensive: skip if there are no statsHandlers - if len(s.opts.statsHandlers) > 0 { + if s.statsHandler != nil { md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) - } + ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + s.statsHandler.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d9bbd4c5..ca87ff97 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cc.NewStream(ctx, desc, method, opts...) } +var emptyMethodConfig = serviceconfig.MethodConfig{} + func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { // Start tracking the RPC for idleness purposes. This is where a stream is // created for both streaming and unary RPCs, and hence is a good place to @@ -217,7 +219,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return nil, err } - var mc serviceconfig.MethodConfig + mc := &emptyMethodConfig var onCommit func() newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...) @@ -240,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth if rpcConfig.Context != nil { ctx = rpcConfig.Context } - mc = rpcConfig.MethodConfig + mc = &rpcConfig.MethodConfig onCommit = rpcConfig.OnCommitted if rpcConfig.Interceptor != nil { rpcInfo.Context = nil @@ -258,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return newStream(ctx, func() {}) } -func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) { callInfo := defaultCallInfo() if mc.WaitForReady != nil { callInfo.failFast = !*mc.WaitForReady @@ -325,7 +327,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs := &clientStream{ callHdr: callHdr, ctx: ctx, - methodConfig: &mc, + methodConfig: mc, opts: opts, callInfo: callInfo, cc: cc, @@ -418,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time - shs := cs.cc.dopts.copts.StatsHandlers - for _, sh := range shs { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay}) + sh := cs.cc.statsHandler + if sh != nil { beginTime = time.Now() - begin := &stats.Begin{ + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{ + FullMethodName: method, FailFast: cs.callInfo.failFast, + NameResolutionDelay: cs.nameResolutionDelay, + }) + sh.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: beginTime, FailFast: cs.callInfo.failFast, IsClientStream: cs.desc.ClientStreams, IsServerStream: cs.desc.ServerStreams, IsTransparentRetryAttempt: isTransparent, - } - sh.HandleRPC(ctx, begin) + }) } var trInfo *traceInfo @@ -461,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) beginTime: beginTime, cs: cs, decompressorV0: cs.cc.dopts.dc, - statsHandlers: shs, + statsHandler: sh, trInfo: trInfo, }, nil } @@ -482,10 +486,8 @@ func (a *csAttempt) getTransport() error { if a.trInfo != nil { a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } - if pick.blocked { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) - } + if pick.blocked && a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{}) } return nil } @@ -529,7 +531,7 @@ func (a *csAttempt) newStream() error { } a.transportStream = s a.ctx = s.Context() - a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -549,6 +551,8 @@ type clientStream struct { sentLast bool // sent an end stream + receivedFirstMsg bool // set after the first message is received + methodConfig *MethodConfig ctx context.Context // the application's context, wrapped by stats/tracing @@ -599,7 +603,7 @@ type csAttempt struct { cs *clientStream transport transport.ClientTransport transportStream *transport.ClientStream - parser *parser + parser parser pickResult balancer.PickResult finished bool @@ -613,8 +617,8 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandlers []stats.Handler - beginTime time.Time + statsHandler stats.Handler + beginTime time.Time // set for newStream errors that may be transparently retried allowTransparentRetry bool @@ -1108,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } return io.EOF } - if len(a.statsHandlers) != 0 { - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) - } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } return nil } func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs - if len(a.statsHandlers) != 0 && payInfo == nil { + if a.statsHandler != nil && payInfo == nil { payInfo = &payloadInfo{} defer payInfo.free() } @@ -1139,16 +1141,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompressorSet = true } - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !cs.desc.ServerStreams && !cs.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + cs.receivedFirstMsg = true if a.trInfo != nil { a.mu.Lock() if a.trInfo.tr != nil { @@ -1156,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, &stats.InPayload{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1172,12 +1179,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (a *csAttempt) finish(err error) { @@ -1210,15 +1217,14 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - for _, sh := range a.statsHandlers { - end := &stats.End{ + if a.statsHandler != nil { + a.statsHandler.HandleRPC(a.ctx, &stats.End{ Client: true, BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, - } - sh.HandleRPC(a.ctx, end) + }) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1324,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.transportStream = s - as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1359,6 +1365,7 @@ type addrConnStream struct { transport transport.ClientTransport ctx context.Context sentLast bool + receivedFirstMsg bool desc *StreamDesc codec baseCodec sendCompressorV0 Compressor @@ -1366,7 +1373,7 @@ type addrConnStream struct { decompressorSet bool decompressorV0 Decompressor decompressorV1 encoding.Compressor - parser *parser + parser parser // mu guards finished and is held for the entire finish method. mu sync.Mutex @@ -1479,15 +1486,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompressorSet = true } - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } + // Received no msg and status OK for non-server streaming rpcs. + if !as.desc.ServerStreams && !as.receivedFirstMsg { + return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC") + } return io.EOF // indicates successful end of stream. } return toRPCErr(err) } + as.receivedFirstMsg = true if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. @@ -1496,12 +1508,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } - return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") + return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message") } func (as *addrConnStream) finish(err error) { @@ -1584,7 +1596,7 @@ type ServerStream interface { type serverStream struct { ctx context.Context s *transport.ServerStream - p *parser + p parser codec baseCodec desc *StreamDesc @@ -1601,7 +1613,7 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler []stats.Handler + statsHandler stats.Handler binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It @@ -1737,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) { binlog.Log(ss.ctx, sm) } } - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } return nil } @@ -1771,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } }() var payInfo *payloadInfo - if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + if ss.statsHandler != nil || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1795,16 +1805,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return toRPCErr(err) } ss.recvFirstMsg = true - if len(ss.statsHandler) != 0 { - for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - Length: payInfo.uncompressedBytes.Len(), - WireLength: payInfo.compressedLength + headerLen, - CompressedLength: payInfo.compressedLength, - }) - } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ @@ -1821,7 +1829,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } // Special handling for non-client-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { + if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF { return nil } else if err != nil { return err diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 468f1106..9e6d018f 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.75.1" +const Version = "1.77.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index f089c7cb..df697931 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -585,8 +585,8 @@ go.opentelemetry.io/contrib/bridges/prometheus # go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 ## explicit; go 1.23.0 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.60.0 @@ -594,12 +594,11 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/inte go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 +## explicit; go 1.24.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil # go.opentelemetry.io/contrib/propagators/jaeger v1.35.0 ## explicit; go 1.22.0 go.opentelemetry.io/contrib/propagators/jaeger @@ -607,11 +606,12 @@ go.opentelemetry.io/contrib/propagators/jaeger ## explicit; go 1.23.0 go.opentelemetry.io/contrib/samplers/jaegerremote go.opentelemetry.io/contrib/samplers/jaegerremote/internal/utils -# go.opentelemetry.io/otel v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/attribute/internal +go.opentelemetry.io/otel/attribute/internal/xxhash go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal/baggage @@ -622,9 +622,10 @@ go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 -go.opentelemetry.io/otel/semconv/v1.30.0 -go.opentelemetry.io/otel/semconv/v1.34.0 go.opentelemetry.io/otel/semconv/v1.37.0 +go.opentelemetry.io/otel/semconv/v1.37.0/httpconv +go.opentelemetry.io/otel/semconv/v1.37.0/otelconv +go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv go.opentelemetry.io/otel/semconv/v1.7.0 # go.opentelemetry.io/otel/exporters/jaeger v1.17.0 ## explicit; go 1.19 @@ -694,33 +695,36 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded go.opentelemetry.io/otel/log/noop -# go.opentelemetry.io/otel/metric v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.37.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/sdk v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation -go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace +go.opentelemetry.io/otel/sdk/trace/internal/env +go.opentelemetry.io/otel/sdk/trace/internal/observ go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/sdk/log v0.12.2 ## explicit; go 1.23.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/sdk/metric v1.37.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/sdk/metric v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate +go.opentelemetry.io/otel/sdk/metric/internal/observ +go.opentelemetry.io/otel/sdk/metric/internal/reservoir go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.38.0 -## explicit; go 1.23.0 +# go.opentelemetry.io/otel/trace v1.39.0 +## explicit; go 1.24.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry @@ -764,14 +768,14 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.43.0 +# golang.org/x/crypto v0.44.0 ## explicit; go 1.24.0 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish # golang.org/x/mod v0.29.0 ## explicit; go 1.24.0 golang.org/x/mod/semver -# golang.org/x/net v0.46.0 +# golang.org/x/net v0.47.0 ## explicit; go 1.24.0 golang.org/x/net/bpf golang.org/x/net/html @@ -792,25 +796,25 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.31.0 +# golang.org/x/oauth2 v0.32.0 ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sync v0.17.0 +# golang.org/x/sync v0.18.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.37.0 +# golang.org/x/sys v0.39.0 ## explicit; go 1.24.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.36.0 +# golang.org/x/term v0.37.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.30.0 +# golang.org/x/text v0.31.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding @@ -863,15 +867,15 @@ golang.org/x/tools/internal/versions # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 -## explicit; go 1.23.0 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 +## explicit; go 1.24.0 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.75.1 -## explicit; go 1.23.0 +# google.golang.org/grpc v1.77.0 +## explicit; go 1.24.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -881,7 +885,6 @@ google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal -google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -891,6 +894,7 @@ google.golang.org/grpc/credentials google.golang.org/grpc/credentials/insecure google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip +google.golang.org/grpc/encoding/internal google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog