From 3b3fd31558c5f2c22947ecd2a33faba6f2bcd155 Mon Sep 17 00:00:00 2001 From: Kevin Lin Date: Tue, 10 Dec 2024 10:40:07 -0800 Subject: [PATCH] Upgrade to github.com/DataDog/datadog-go/v5 --- go.mod | 4 +- go.sum | 35 +- pkg/smokescreen/metrics/statsd_metrics.go | 5 +- .../github.com/DataDog/datadog-go/LICENSE.txt | 19 - .../DataDog/datadog-go/statsd/README.md | 4 - .../DataDog/datadog-go/statsd/aggregator.go | 271 -------- .../DataDog/datadog-go/statsd/buffer.go | 191 ----- .../DataDog/datadog-go/statsd/buffer_pool.go | 40 -- .../DataDog/datadog-go/statsd/event.go | 91 --- .../DataDog/datadog-go/statsd/fnv1a.go | 39 -- .../DataDog/datadog-go/statsd/format.go | 257 ------- .../DataDog/datadog-go/statsd/metrics.go | 181 ----- .../DataDog/datadog-go/statsd/noop.go | 91 --- .../DataDog/datadog-go/statsd/options.go | 317 --------- .../DataDog/datadog-go/statsd/pipe.go | 9 - .../DataDog/datadog-go/statsd/pipe_windows.go | 84 --- .../DataDog/datadog-go/statsd/sender.go | 130 ---- .../datadog-go/statsd/service_check.go | 70 -- .../DataDog/datadog-go/statsd/statsd.go | 657 ------------------ .../DataDog/datadog-go/statsd/telemetry.go | 151 ---- .../DataDog/datadog-go/statsd/udp.go | 40 -- .../DataDog/datadog-go/statsd/uds.go | 100 --- .../DataDog/datadog-go/statsd/uds_windows.go | 10 - .../DataDog/datadog-go/statsd/worker.go | 163 ----- vendor/modules.txt | 8 +- 25 files changed, 25 insertions(+), 2942 deletions(-) delete mode 100644 vendor/github.com/DataDog/datadog-go/LICENSE.txt delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/README.md delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/aggregator.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/buffer.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/buffer_pool.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/event.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/fnv1a.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/format.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/metrics.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/noop.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/options.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/pipe.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/pipe_windows.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/sender.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/service_check.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/statsd.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/telemetry.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/udp.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/uds.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/uds_windows.go delete mode 100644 vendor/github.com/DataDog/datadog-go/statsd/worker.go diff --git a/go.mod b/go.mod index 3451d220..123844dc 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/stripe/smokescreen go 1.18 require ( - github.com/DataDog/datadog-go v4.5.1+incompatible + github.com/DataDog/datadog-go/v5 v5.6.0 github.com/armon/go-proxyproto v0.0.0-20170620220930-48572f11356f github.com/carlmjohnson/versioninfo v0.22.4 github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186 @@ -11,7 +11,7 @@ require ( github.com/prometheus/client_golang v1.13.0 github.com/rs/xid v1.2.1 github.com/sirupsen/logrus v1.9.0 - github.com/stretchr/testify v1.8.0 + github.com/stretchr/testify v1.8.1 github.com/stripe/goproxy v0.0.0-20241022131412-58117846327a golang.org/x/net v0.17.0 gopkg.in/urfave/cli.v1 v1.20.0 diff --git a/go.sum b/go.sum index d57db321..e6e409d9 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,9 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v4.5.1+incompatible h1:LPLdXNXy4LPcHYxvalEyfRt45OmbIaOy4WaSFOL4veg= -github.com/DataDog/datadog-go v4.5.1+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= +github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -90,6 +91,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -203,38 +205,28 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stripe/goproxy v0.0.0-20231206175114-560c3ba6a2a1 h1:kA8wVCrTI7UE2Z8oj24W75/J+IUA/fFn8vYYXs/sJeE= -github.com/stripe/goproxy v0.0.0-20231206175114-560c3ba6a2a1/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= -github.com/stripe/goproxy v0.0.0-20240702223215-529f11a6f861 h1:dlR0X8/38L9ip1ydDazfTRyPe0iW6cepmIcaygH2r5Q= -github.com/stripe/goproxy v0.0.0-20240702223215-529f11a6f861/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= -github.com/stripe/goproxy v0.0.0-20240702232545-72d7dbc6d4fe h1:RzjmXDVOjWq9sPRc/rn6a9g1N3C+q9LzP8pI7aPEzPQ= -github.com/stripe/goproxy v0.0.0-20240702232545-72d7dbc6d4fe/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= -github.com/stripe/goproxy v0.0.0-20240711170433-75b93c00dfb0 h1:1xKgjoLWAosf0yoKocNN4mel1++AKqJw9axpJyz1JRw= -github.com/stripe/goproxy v0.0.0-20240711170433-75b93c00dfb0/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= -github.com/stripe/goproxy v0.0.0-20240909161309-906e92b723dd h1:+GFSUmU0/RS984Zp8KvYxjU95DxVIGJflsOfITHkF68= -github.com/stripe/goproxy v0.0.0-20240909161309-906e92b723dd/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= -github.com/stripe/goproxy v0.0.0-20241017101008-e12ef0653f22 h1:tyY7UmUjUv1FF6/bv7P9hgRoNJEuKC2wktIm7e+SHeI= -github.com/stripe/goproxy v0.0.0-20241017101008-e12ef0653f22/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= -github.com/stripe/goproxy v0.0.0-20241022121110-37ebb5c1cae0 h1:4tKku4LktTP2RI8+7f0hV4nlXB8hQK+Bddmnj3oBI5Q= -github.com/stripe/goproxy v0.0.0-20241022121110-37ebb5c1cae0/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stripe/goproxy v0.0.0-20241022131412-58117846327a h1:7i7jyPHkHbyyqgyoE9dfmP4JtysKB42EemnBRjNofX0= github.com/stripe/goproxy v0.0.0-20241022131412-58117846327a/go.mod h1:hF2CVgH4++5ijZiy9grGVP8Fsi4u+SMOtbnIKYbMUjY= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -276,6 +268,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -306,6 +299,7 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -327,6 +321,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -341,6 +336,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -361,7 +357,9 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -423,6 +421,7 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/smokescreen/metrics/statsd_metrics.go b/pkg/smokescreen/metrics/statsd_metrics.go index 6f74479b..692053b0 100644 --- a/pkg/smokescreen/metrics/statsd_metrics.go +++ b/pkg/smokescreen/metrics/statsd_metrics.go @@ -2,7 +2,7 @@ package metrics import ( "fmt" - "github.com/DataDog/datadog-go/statsd" + "github.com/DataDog/datadog-go/v5/statsd" "sync/atomic" "time" ) @@ -20,11 +20,10 @@ type StatsdMetricsClient struct { // NewMetricsClient creates a new StatsdMetricsClient with the provided statsd address and // namespace. func NewStatsdMetricsClient(addr, namespace string) (*StatsdMetricsClient, error) { - c, err := statsd.New(addr) + c, err := statsd.New(addr, statsd.WithNamespace(namespace)) if err != nil { return nil, err } - c.Namespace = namespace // Populate the client's map to hold metric tags metricsTags := make(map[string][]string) diff --git a/vendor/github.com/DataDog/datadog-go/LICENSE.txt b/vendor/github.com/DataDog/datadog-go/LICENSE.txt deleted file mode 100644 index 97cd06d7..00000000 --- a/vendor/github.com/DataDog/datadog-go/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Datadog, Inc - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/DataDog/datadog-go/statsd/README.md b/vendor/github.com/DataDog/datadog-go/statsd/README.md deleted file mode 100644 index 2fc89968..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## Overview - -Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags -and histograms. diff --git a/vendor/github.com/DataDog/datadog-go/statsd/aggregator.go b/vendor/github.com/DataDog/datadog-go/statsd/aggregator.go deleted file mode 100644 index 7d1d44f5..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/aggregator.go +++ /dev/null @@ -1,271 +0,0 @@ -package statsd - -import ( - "strings" - "sync" - "sync/atomic" - "time" -) - -type ( - countsMap map[string]*countMetric - gaugesMap map[string]*gaugeMetric - setsMap map[string]*setMetric - bufferedMetricMap map[string]*histogramMetric -) - -// bufferedMetricContexts represent the contexts for Histograms, Distributions -// and Timing. Since those 3 metric types behave the same way and are sampled -// with the same type they're represented by the same class. -type bufferedMetricContexts struct { - nbContext int32 - mutex sync.RWMutex - values bufferedMetricMap - newMetric func(string, float64, string) *bufferedMetric -} - -func newBufferedContexts(newMetric func(string, float64, string) *bufferedMetric) bufferedMetricContexts { - return bufferedMetricContexts{ - values: bufferedMetricMap{}, - newMetric: newMetric, - } -} - -func (bc *bufferedMetricContexts) flush(metrics []metric) []metric { - bc.mutex.Lock() - values := bc.values - bc.values = bufferedMetricMap{} - bc.mutex.Unlock() - - for _, d := range values { - metrics = append(metrics, d.flushUnsafe()) - } - atomic.AddInt32(&bc.nbContext, int32(len(values))) - return metrics -} - -func (bc *bufferedMetricContexts) sample(name string, value float64, tags []string) error { - context, stringTags := getContextAndTags(name, tags) - bc.mutex.RLock() - if v, found := bc.values[context]; found { - v.sample(value) - bc.mutex.RUnlock() - return nil - } - bc.mutex.RUnlock() - - bc.mutex.Lock() - bc.values[context] = bc.newMetric(name, value, stringTags) - bc.mutex.Unlock() - return nil -} - -func (bc *bufferedMetricContexts) resetAndGetNbContext() int32 { - return atomic.SwapInt32(&bc.nbContext, 0) -} - -type aggregator struct { - nbContextGauge int32 - nbContextCount int32 - nbContextSet int32 - - countsM sync.RWMutex - gaugesM sync.RWMutex - setsM sync.RWMutex - - gauges gaugesMap - counts countsMap - sets setsMap - histograms bufferedMetricContexts - distributions bufferedMetricContexts - timings bufferedMetricContexts - - closed chan struct{} - exited chan struct{} - - client *Client -} - -type aggregatorMetrics struct { - nbContext int32 - nbContextGauge int32 - nbContextCount int32 - nbContextSet int32 - nbContextHistogram int32 - nbContextDistribution int32 - nbContextTiming int32 -} - -func newAggregator(c *Client) *aggregator { - return &aggregator{ - client: c, - counts: countsMap{}, - gauges: gaugesMap{}, - sets: setsMap{}, - histograms: newBufferedContexts(newHistogramMetric), - distributions: newBufferedContexts(newDistributionMetric), - timings: newBufferedContexts(newTimingMetric), - closed: make(chan struct{}), - exited: make(chan struct{}), - } -} - -func (a *aggregator) start(flushInterval time.Duration) { - ticker := time.NewTicker(flushInterval) - - go func() { - for { - select { - case <-ticker.C: - a.sendMetrics() - case <-a.closed: - close(a.exited) - return - } - } - }() -} - -func (a *aggregator) sendMetrics() { - for _, m := range a.flushMetrics() { - a.client.send(m) - } -} - -func (a *aggregator) stop() { - close(a.closed) - <-a.exited - a.sendMetrics() -} - -func (a *aggregator) flushTelemetryMetrics() *aggregatorMetrics { - if a == nil { - return nil - } - - am := &aggregatorMetrics{ - nbContextGauge: atomic.SwapInt32(&a.nbContextGauge, 0), - nbContextCount: atomic.SwapInt32(&a.nbContextCount, 0), - nbContextSet: atomic.SwapInt32(&a.nbContextSet, 0), - nbContextHistogram: a.histograms.resetAndGetNbContext(), - nbContextDistribution: a.distributions.resetAndGetNbContext(), - nbContextTiming: a.timings.resetAndGetNbContext(), - } - - am.nbContext = am.nbContextGauge + am.nbContextCount + am.nbContextSet + am.nbContextHistogram + am.nbContextDistribution + am.nbContextTiming - return am -} - -func (a *aggregator) flushMetrics() []metric { - metrics := []metric{} - - // We reset the values to avoid sending 'zero' values for metrics not - // sampled during this flush interval - - a.setsM.Lock() - sets := a.sets - a.sets = setsMap{} - a.setsM.Unlock() - - for _, s := range sets { - metrics = append(metrics, s.flushUnsafe()...) - } - - a.gaugesM.Lock() - gauges := a.gauges - a.gauges = gaugesMap{} - a.gaugesM.Unlock() - - for _, g := range gauges { - metrics = append(metrics, g.flushUnsafe()) - } - - a.countsM.Lock() - counts := a.counts - a.counts = countsMap{} - a.countsM.Unlock() - - for _, c := range counts { - metrics = append(metrics, c.flushUnsafe()) - } - - metrics = a.histograms.flush(metrics) - metrics = a.distributions.flush(metrics) - metrics = a.timings.flush(metrics) - - atomic.AddInt32(&a.nbContextCount, int32(len(counts))) - atomic.AddInt32(&a.nbContextGauge, int32(len(gauges))) - atomic.AddInt32(&a.nbContextSet, int32(len(sets))) - return metrics -} - -func getContext(name string, tags []string) string { - return name + ":" + strings.Join(tags, tagSeparatorSymbol) -} - -func getContextAndTags(name string, tags []string) (string, string) { - stringTags := strings.Join(tags, tagSeparatorSymbol) - return name + ":" + stringTags, stringTags -} - -func (a *aggregator) count(name string, value int64, tags []string) error { - context := getContext(name, tags) - a.countsM.RLock() - if count, found := a.counts[context]; found { - count.sample(value) - a.countsM.RUnlock() - return nil - } - a.countsM.RUnlock() - - a.countsM.Lock() - a.counts[context] = newCountMetric(name, value, tags) - a.countsM.Unlock() - return nil -} - -func (a *aggregator) gauge(name string, value float64, tags []string) error { - context := getContext(name, tags) - a.gaugesM.RLock() - if gauge, found := a.gauges[context]; found { - gauge.sample(value) - a.gaugesM.RUnlock() - return nil - } - a.gaugesM.RUnlock() - - gauge := newGaugeMetric(name, value, tags) - - a.gaugesM.Lock() - a.gauges[context] = gauge - a.gaugesM.Unlock() - return nil -} - -func (a *aggregator) set(name string, value string, tags []string) error { - context := getContext(name, tags) - a.setsM.RLock() - if set, found := a.sets[context]; found { - set.sample(value) - a.setsM.RUnlock() - return nil - } - a.setsM.RUnlock() - - a.setsM.Lock() - a.sets[context] = newSetMetric(name, value, tags) - a.setsM.Unlock() - return nil -} - -func (a *aggregator) histogram(name string, value float64, tags []string) error { - return a.histograms.sample(name, value, tags) -} - -func (a *aggregator) distribution(name string, value float64, tags []string) error { - return a.distributions.sample(name, value, tags) -} - -func (a *aggregator) timing(name string, value float64, tags []string) error { - return a.timings.sample(name, value, tags) -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/buffer.go b/vendor/github.com/DataDog/datadog-go/statsd/buffer.go deleted file mode 100644 index 37ea6ac2..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/buffer.go +++ /dev/null @@ -1,191 +0,0 @@ -package statsd - -import ( - "strconv" -) - -type bufferFullError string - -func (e bufferFullError) Error() string { return string(e) } - -const errBufferFull = bufferFullError("statsd buffer is full") - -type partialWriteError string - -func (e partialWriteError) Error() string { return string(e) } - -const errPartialWrite = partialWriteError("value partially written") - -const metricOverhead = 512 - -// statsdBuffer is a buffer containing statsd messages -// this struct methods are NOT safe for concurent use -type statsdBuffer struct { - buffer []byte - maxSize int - maxElements int - elementCount int -} - -func newStatsdBuffer(maxSize, maxElements int) *statsdBuffer { - return &statsdBuffer{ - buffer: make([]byte, 0, maxSize+metricOverhead), // pre-allocate the needed size + metricOverhead to avoid having Go re-allocate on it's own if an element does not fit - maxSize: maxSize, - maxElements: maxElements, - } -} - -func (b *statsdBuffer) writeGauge(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendGauge(b.buffer, namespace, globalTags, name, value, tags, rate) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeCount(namespace string, globalTags []string, name string, value int64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendCount(b.buffer, namespace, globalTags, name, value, tags, rate) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeHistogram(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendHistogram(b.buffer, namespace, globalTags, name, value, tags, rate) - return b.validateNewElement(originalBuffer) -} - -// writeAggregated serialized as many values as possible in the current buffer and return the position in values where it stopped. -func (b *statsdBuffer) writeAggregated(metricSymbol []byte, namespace string, globalTags []string, name string, values []float64, tags string, tagSize int) (int, error) { - if b.elementCount >= b.maxElements { - return 0, errBufferFull - } - - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendHeader(b.buffer, namespace, name) - - // buffer already full - if len(b.buffer)+tagSize > b.maxSize { - b.buffer = originalBuffer - return 0, errBufferFull - } - - // We add as many value as possible - var position int - for idx, v := range values { - previousBuffer := b.buffer - if idx != 0 { - b.buffer = append(b.buffer, ':') - } - b.buffer = strconv.AppendFloat(b.buffer, v, 'f', -1, 64) - - // Should we stop serializing and switch to another buffer - if len(b.buffer)+tagSize > b.maxSize { - b.buffer = previousBuffer - break - } - position = idx + 1 - } - - // we could not add a single value - if position == 0 { - b.buffer = originalBuffer - return 0, errBufferFull - } - - b.buffer = append(b.buffer, '|') - b.buffer = append(b.buffer, metricSymbol...) - b.buffer = appendTagsAggregated(b.buffer, globalTags, tags) - b.elementCount++ - - if position != len(values) { - return position, errPartialWrite - } - return position, nil - -} - -func (b *statsdBuffer) writeDistribution(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendDistribution(b.buffer, namespace, globalTags, name, value, tags, rate) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeSet(namespace string, globalTags []string, name string, value string, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendSet(b.buffer, namespace, globalTags, name, value, tags, rate) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeTiming(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendTiming(b.buffer, namespace, globalTags, name, value, tags, rate) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeEvent(event Event, globalTags []string) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendEvent(b.buffer, event, globalTags) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) writeServiceCheck(serviceCheck ServiceCheck, globalTags []string) error { - if b.elementCount >= b.maxElements { - return errBufferFull - } - originalBuffer := b.buffer - b.writeSeparator() - b.buffer = appendServiceCheck(b.buffer, serviceCheck, globalTags) - return b.validateNewElement(originalBuffer) -} - -func (b *statsdBuffer) validateNewElement(originalBuffer []byte) error { - if len(b.buffer) > b.maxSize { - b.buffer = originalBuffer - return errBufferFull - } - b.elementCount++ - return nil -} - -func (b *statsdBuffer) writeSeparator() { - if b.elementCount != 0 { - b.buffer = append(b.buffer, '\n') - } -} - -func (b *statsdBuffer) reset() { - b.buffer = b.buffer[:0] - b.elementCount = 0 -} - -func (b *statsdBuffer) bytes() []byte { - return b.buffer -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/buffer_pool.go b/vendor/github.com/DataDog/datadog-go/statsd/buffer_pool.go deleted file mode 100644 index 7a3e3c9d..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/buffer_pool.go +++ /dev/null @@ -1,40 +0,0 @@ -package statsd - -type bufferPool struct { - pool chan *statsdBuffer - bufferMaxSize int - bufferMaxElements int -} - -func newBufferPool(poolSize, bufferMaxSize, bufferMaxElements int) *bufferPool { - p := &bufferPool{ - pool: make(chan *statsdBuffer, poolSize), - bufferMaxSize: bufferMaxSize, - bufferMaxElements: bufferMaxElements, - } - for i := 0; i < poolSize; i++ { - p.addNewBuffer() - } - return p -} - -func (p *bufferPool) addNewBuffer() { - p.pool <- newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements) -} - -func (p *bufferPool) borrowBuffer() *statsdBuffer { - select { - case b := <-p.pool: - return b - default: - return newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements) - } -} - -func (p *bufferPool) returnBuffer(buffer *statsdBuffer) { - buffer.reset() - select { - case p.pool <- buffer: - default: - } -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/event.go b/vendor/github.com/DataDog/datadog-go/statsd/event.go deleted file mode 100644 index cf966d1a..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/event.go +++ /dev/null @@ -1,91 +0,0 @@ -package statsd - -import ( - "fmt" - "time" -) - -// Events support -// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 -// The reason why they got exported is so that client code can directly use the types. - -// EventAlertType is the alert type for events -type EventAlertType string - -const ( - // Info is the "info" AlertType for events - Info EventAlertType = "info" - // Error is the "error" AlertType for events - Error EventAlertType = "error" - // Warning is the "warning" AlertType for events - Warning EventAlertType = "warning" - // Success is the "success" AlertType for events - Success EventAlertType = "success" -) - -// EventPriority is the event priority for events -type EventPriority string - -const ( - // Normal is the "normal" Priority for events - Normal EventPriority = "normal" - // Low is the "low" Priority for events - Low EventPriority = "low" -) - -// An Event is an object that can be posted to your DataDog event stream. -type Event struct { - // Title of the event. Required. - Title string - // Text is the description of the event. Required. - Text string - // Timestamp is a timestamp for the event. If not provided, the dogstatsd - // server will set this to the current time. - Timestamp time.Time - // Hostname for the event. - Hostname string - // AggregationKey groups this event with others of the same key. - AggregationKey string - // Priority of the event. Can be statsd.Low or statsd.Normal. - Priority EventPriority - // SourceTypeName is a source type for the event. - SourceTypeName string - // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. - // If absent, the default value applied by the dogstatsd server is Info. - AlertType EventAlertType - // Tags for the event. - Tags []string -} - -// NewEvent creates a new event with the given title and text. Error checking -// against these values is done at send-time, or upon running e.Check. -func NewEvent(title, text string) *Event { - return &Event{ - Title: title, - Text: text, - } -} - -// Check verifies that an event is valid. -func (e Event) Check() error { - if len(e.Title) == 0 { - return fmt.Errorf("statsd.Event title is required") - } - if len(e.Text) == 0 { - return fmt.Errorf("statsd.Event text is required") - } - return nil -} - -// Encode returns the dogstatsd wire protocol representation for an event. -// Tags may be passed which will be added to the encoded output but not to -// the Event's list of tags, eg. for default tags. -func (e Event) Encode(tags ...string) (string, error) { - err := e.Check() - if err != nil { - return "", err - } - var buffer []byte - buffer = appendEvent(buffer, e, tags) - return string(buffer), nil -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/fnv1a.go b/vendor/github.com/DataDog/datadog-go/statsd/fnv1a.go deleted file mode 100644 index 03dc8a07..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/fnv1a.go +++ /dev/null @@ -1,39 +0,0 @@ -package statsd - -const ( - // FNV-1a - offset32 = uint32(2166136261) - prime32 = uint32(16777619) - - // init32 is what 32 bits hash values should be initialized with. - init32 = offset32 -) - -// HashString32 returns the hash of s. -func hashString32(s string) uint32 { - return addString32(init32, s) -} - -// AddString32 adds the hash of s to the precomputed hash value h. -func addString32(h uint32, s string) uint32 { - i := 0 - n := (len(s) / 8) * 8 - - for i != n { - h = (h ^ uint32(s[i])) * prime32 - h = (h ^ uint32(s[i+1])) * prime32 - h = (h ^ uint32(s[i+2])) * prime32 - h = (h ^ uint32(s[i+3])) * prime32 - h = (h ^ uint32(s[i+4])) * prime32 - h = (h ^ uint32(s[i+5])) * prime32 - h = (h ^ uint32(s[i+6])) * prime32 - h = (h ^ uint32(s[i+7])) * prime32 - i += 8 - } - - for _, c := range s[i:] { - h = (h ^ uint32(c)) * prime32 - } - - return h -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/format.go b/vendor/github.com/DataDog/datadog-go/statsd/format.go deleted file mode 100644 index 8d62aa7b..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/format.go +++ /dev/null @@ -1,257 +0,0 @@ -package statsd - -import ( - "strconv" - "strings" -) - -var ( - gaugeSymbol = []byte("g") - countSymbol = []byte("c") - histogramSymbol = []byte("h") - distributionSymbol = []byte("d") - setSymbol = []byte("s") - timingSymbol = []byte("ms") - tagSeparatorSymbol = "," -) - -func appendHeader(buffer []byte, namespace string, name string) []byte { - if namespace != "" { - buffer = append(buffer, namespace...) - } - buffer = append(buffer, name...) - buffer = append(buffer, ':') - return buffer -} - -func appendRate(buffer []byte, rate float64) []byte { - if rate < 1 { - buffer = append(buffer, "|@"...) - buffer = strconv.AppendFloat(buffer, rate, 'f', -1, 64) - } - return buffer -} - -func appendWithoutNewlines(buffer []byte, s string) []byte { - // fastpath for strings without newlines - if strings.IndexByte(s, '\n') == -1 { - return append(buffer, s...) - } - - for _, b := range []byte(s) { - if b != '\n' { - buffer = append(buffer, b) - } - } - return buffer -} - -func appendTags(buffer []byte, globalTags []string, tags []string) []byte { - if len(globalTags) == 0 && len(tags) == 0 { - return buffer - } - buffer = append(buffer, "|#"...) - firstTag := true - - for _, tag := range globalTags { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tag) - firstTag = false - } - for _, tag := range tags { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tag) - firstTag = false - } - return buffer -} - -func appendTagsAggregated(buffer []byte, globalTags []string, tags string) []byte { - if len(globalTags) == 0 && tags == "" { - return buffer - } - - buffer = append(buffer, "|#"...) - firstTag := true - - for _, tag := range globalTags { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tag) - firstTag = false - } - if tags != "" { - if !firstTag { - buffer = append(buffer, tagSeparatorSymbol...) - } - buffer = appendWithoutNewlines(buffer, tags) - } - return buffer -} - -func appendFloatMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64, precision int) []byte { - buffer = appendHeader(buffer, namespace, name) - buffer = strconv.AppendFloat(buffer, value, 'f', precision, 64) - buffer = append(buffer, '|') - buffer = append(buffer, typeSymbol...) - buffer = appendRate(buffer, rate) - buffer = appendTags(buffer, globalTags, tags) - return buffer -} - -func appendIntegerMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte { - buffer = appendHeader(buffer, namespace, name) - buffer = strconv.AppendInt(buffer, value, 10) - buffer = append(buffer, '|') - buffer = append(buffer, typeSymbol...) - buffer = appendRate(buffer, rate) - buffer = appendTags(buffer, globalTags, tags) - return buffer -} - -func appendStringMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte { - buffer = appendHeader(buffer, namespace, name) - buffer = append(buffer, value...) - buffer = append(buffer, '|') - buffer = append(buffer, typeSymbol...) - buffer = appendRate(buffer, rate) - buffer = appendTags(buffer, globalTags, tags) - return buffer -} - -func appendGauge(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, gaugeSymbol, namespace, globalTags, name, value, tags, rate, -1) -} - -func appendCount(buffer []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte { - return appendIntegerMetric(buffer, countSymbol, namespace, globalTags, name, value, tags, rate) -} - -func appendHistogram(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, histogramSymbol, namespace, globalTags, name, value, tags, rate, -1) -} - -func appendDistribution(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, distributionSymbol, namespace, globalTags, name, value, tags, rate, -1) -} - -func appendSet(buffer []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte { - return appendStringMetric(buffer, setSymbol, namespace, globalTags, name, value, tags, rate) -} - -func appendTiming(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { - return appendFloatMetric(buffer, timingSymbol, namespace, globalTags, name, value, tags, rate, 6) -} - -func escapedEventTextLen(text string) int { - return len(text) + strings.Count(text, "\n") -} - -func appendEscapedEventText(buffer []byte, text string) []byte { - for _, b := range []byte(text) { - if b != '\n' { - buffer = append(buffer, b) - } else { - buffer = append(buffer, "\\n"...) - } - } - return buffer -} - -func appendEvent(buffer []byte, event Event, globalTags []string) []byte { - escapedTextLen := escapedEventTextLen(event.Text) - - buffer = append(buffer, "_e{"...) - buffer = strconv.AppendInt(buffer, int64(len(event.Title)), 10) - buffer = append(buffer, tagSeparatorSymbol...) - buffer = strconv.AppendInt(buffer, int64(escapedTextLen), 10) - buffer = append(buffer, "}:"...) - buffer = append(buffer, event.Title...) - buffer = append(buffer, '|') - if escapedTextLen != len(event.Text) { - buffer = appendEscapedEventText(buffer, event.Text) - } else { - buffer = append(buffer, event.Text...) - } - - if !event.Timestamp.IsZero() { - buffer = append(buffer, "|d:"...) - buffer = strconv.AppendInt(buffer, int64(event.Timestamp.Unix()), 10) - } - - if len(event.Hostname) != 0 { - buffer = append(buffer, "|h:"...) - buffer = append(buffer, event.Hostname...) - } - - if len(event.AggregationKey) != 0 { - buffer = append(buffer, "|k:"...) - buffer = append(buffer, event.AggregationKey...) - } - - if len(event.Priority) != 0 { - buffer = append(buffer, "|p:"...) - buffer = append(buffer, event.Priority...) - } - - if len(event.SourceTypeName) != 0 { - buffer = append(buffer, "|s:"...) - buffer = append(buffer, event.SourceTypeName...) - } - - if len(event.AlertType) != 0 { - buffer = append(buffer, "|t:"...) - buffer = append(buffer, string(event.AlertType)...) - } - - buffer = appendTags(buffer, globalTags, event.Tags) - return buffer -} - -func appendEscapedServiceCheckText(buffer []byte, text string) []byte { - for i := 0; i < len(text); i++ { - if text[i] == '\n' { - buffer = append(buffer, "\\n"...) - } else if text[i] == 'm' && i+1 < len(text) && text[i+1] == ':' { - buffer = append(buffer, "m\\:"...) - i++ - } else { - buffer = append(buffer, text[i]) - } - } - return buffer -} - -func appendServiceCheck(buffer []byte, serviceCheck ServiceCheck, globalTags []string) []byte { - buffer = append(buffer, "_sc|"...) - buffer = append(buffer, serviceCheck.Name...) - buffer = append(buffer, '|') - buffer = strconv.AppendInt(buffer, int64(serviceCheck.Status), 10) - - if !serviceCheck.Timestamp.IsZero() { - buffer = append(buffer, "|d:"...) - buffer = strconv.AppendInt(buffer, int64(serviceCheck.Timestamp.Unix()), 10) - } - - if len(serviceCheck.Hostname) != 0 { - buffer = append(buffer, "|h:"...) - buffer = append(buffer, serviceCheck.Hostname...) - } - - buffer = appendTags(buffer, globalTags, serviceCheck.Tags) - - if len(serviceCheck.Message) != 0 { - buffer = append(buffer, "|m:"...) - buffer = appendEscapedServiceCheckText(buffer, serviceCheck.Message) - } - return buffer -} - -func appendSeparator(buffer []byte) []byte { - return append(buffer, '\n') -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/metrics.go b/vendor/github.com/DataDog/datadog-go/statsd/metrics.go deleted file mode 100644 index 99ed4da5..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/metrics.go +++ /dev/null @@ -1,181 +0,0 @@ -package statsd - -import ( - "math" - "sync" - "sync/atomic" -) - -/* -Those are metrics type that can be aggregated on the client side: - - Gauge - - Count - - Set -*/ - -type countMetric struct { - value int64 - name string - tags []string -} - -func newCountMetric(name string, value int64, tags []string) *countMetric { - return &countMetric{ - value: value, - name: name, - tags: tags, - } -} - -func (c *countMetric) sample(v int64) { - atomic.AddInt64(&c.value, v) -} - -func (c *countMetric) flushUnsafe() metric { - return metric{ - metricType: count, - name: c.name, - tags: c.tags, - rate: 1, - ivalue: c.value, - } -} - -// Gauge - -type gaugeMetric struct { - value uint64 - name string - tags []string -} - -func newGaugeMetric(name string, value float64, tags []string) *gaugeMetric { - return &gaugeMetric{ - value: math.Float64bits(value), - name: name, - tags: tags, - } -} - -func (g *gaugeMetric) sample(v float64) { - atomic.StoreUint64(&g.value, math.Float64bits(v)) -} - -func (g *gaugeMetric) flushUnsafe() metric { - return metric{ - metricType: gauge, - name: g.name, - tags: g.tags, - rate: 1, - fvalue: math.Float64frombits(g.value), - } -} - -// Set - -type setMetric struct { - data map[string]struct{} - name string - tags []string - sync.Mutex -} - -func newSetMetric(name string, value string, tags []string) *setMetric { - set := &setMetric{ - data: map[string]struct{}{}, - name: name, - tags: tags, - } - set.data[value] = struct{}{} - return set -} - -func (s *setMetric) sample(v string) { - s.Lock() - defer s.Unlock() - s.data[v] = struct{}{} -} - -// Sets are aggregated on the agent side too. We flush the keys so a set from -// multiple application can be correctly aggregated on the agent side. -func (s *setMetric) flushUnsafe() []metric { - if len(s.data) == 0 { - return nil - } - - metrics := make([]metric, len(s.data)) - i := 0 - for value := range s.data { - metrics[i] = metric{ - metricType: set, - name: s.name, - tags: s.tags, - rate: 1, - svalue: value, - } - i++ - } - return metrics -} - -// Histograms, Distributions and Timings - -type bufferedMetric struct { - sync.Mutex - - data []float64 - name string - // Histograms and Distributions store tags as one string since we need - // to compute its size multiple time when serializing. - tags string - mtype metricType -} - -func (s *bufferedMetric) sample(v float64) { - s.Lock() - defer s.Unlock() - s.data = append(s.data, v) -} - -func (s *bufferedMetric) flushUnsafe() metric { - return metric{ - metricType: s.mtype, - name: s.name, - stags: s.tags, - rate: 1, - fvalues: s.data, - } -} - -type histogramMetric = bufferedMetric - -func newHistogramMetric(name string, value float64, stringTags string) *histogramMetric { - return &histogramMetric{ - data: []float64{value}, - name: name, - tags: stringTags, - mtype: histogramAggregated, - } -} - -type distributionMetric = bufferedMetric - -func newDistributionMetric(name string, value float64, stringTags string) *distributionMetric { - return &distributionMetric{ - data: []float64{value}, - name: name, - tags: stringTags, - mtype: distributionAggregated, - } -} - -type timingMetric = bufferedMetric - -func newTimingMetric(name string, value float64, stringTags string) *timingMetric { - return &timingMetric{ - data: []float64{value}, - name: name, - tags: stringTags, - mtype: timingAggregated, - } -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/noop.go b/vendor/github.com/DataDog/datadog-go/statsd/noop.go deleted file mode 100644 index 01078333..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/noop.go +++ /dev/null @@ -1,91 +0,0 @@ -package statsd - -import "time" - -// NoOpClient is a statsd client that does nothing. Can be useful in testing -// situations for library users. -type NoOpClient struct{} - -// Gauge does nothing and returns nil -func (n *NoOpClient) Gauge(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Count does nothing and returns nil -func (n *NoOpClient) Count(name string, value int64, tags []string, rate float64) error { - return nil -} - -// Histogram does nothing and returns nil -func (n *NoOpClient) Histogram(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Distribution does nothing and returns nil -func (n *NoOpClient) Distribution(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Decr does nothing and returns nil -func (n *NoOpClient) Decr(name string, tags []string, rate float64) error { - return nil -} - -// Incr does nothing and returns nil -func (n *NoOpClient) Incr(name string, tags []string, rate float64) error { - return nil -} - -// Set does nothing and returns nil -func (n *NoOpClient) Set(name string, value string, tags []string, rate float64) error { - return nil -} - -// Timing does nothing and returns nil -func (n *NoOpClient) Timing(name string, value time.Duration, tags []string, rate float64) error { - return nil -} - -// TimeInMilliseconds does nothing and returns nil -func (n *NoOpClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { - return nil -} - -// Event does nothing and returns nil -func (n *NoOpClient) Event(e *Event) error { - return nil -} - -// SimpleEvent does nothing and returns nil -func (n *NoOpClient) SimpleEvent(title, text string) error { - return nil -} - -// ServiceCheck does nothing and returns nil -func (n *NoOpClient) ServiceCheck(sc *ServiceCheck) error { - return nil -} - -// SimpleServiceCheck does nothing and returns nil -func (n *NoOpClient) SimpleServiceCheck(name string, status ServiceCheckStatus) error { - return nil -} - -// Close does nothing and returns nil -func (n *NoOpClient) Close() error { - return nil -} - -// Flush does nothing and returns nil -func (n *NoOpClient) Flush() error { - return nil -} - -// SetWriteTimeout does nothing and returns nil -func (n *NoOpClient) SetWriteTimeout(d time.Duration) error { - return nil -} - -// Verify that NoOpClient implements the ClientInterface. -// https://golang.org/doc/faq#guarantee_satisfies_interface -var _ ClientInterface = &NoOpClient{} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/options.go b/vendor/github.com/DataDog/datadog-go/statsd/options.go deleted file mode 100644 index 886a8238..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/options.go +++ /dev/null @@ -1,317 +0,0 @@ -package statsd - -import ( - "math" - "strings" - "time" -) - -var ( - // DefaultNamespace is the default value for the Namespace option - DefaultNamespace = "" - // DefaultTags is the default value for the Tags option - DefaultTags = []string{} - // DefaultMaxBytesPerPayload is the default value for the MaxBytesPerPayload option - DefaultMaxBytesPerPayload = 0 - // DefaultMaxMessagesPerPayload is the default value for the MaxMessagesPerPayload option - DefaultMaxMessagesPerPayload = math.MaxInt32 - // DefaultBufferPoolSize is the default value for the DefaultBufferPoolSize option - DefaultBufferPoolSize = 0 - // DefaultBufferFlushInterval is the default value for the BufferFlushInterval option - DefaultBufferFlushInterval = 100 * time.Millisecond - // DefaultBufferShardCount is the default value for the BufferShardCount option - DefaultBufferShardCount = 32 - // DefaultSenderQueueSize is the default value for the DefaultSenderQueueSize option - DefaultSenderQueueSize = 0 - // DefaultWriteTimeoutUDS is the default value for the WriteTimeoutUDS option - DefaultWriteTimeoutUDS = 100 * time.Millisecond - // DefaultTelemetry is the default value for the Telemetry option - DefaultTelemetry = true - // DefaultReceivingMode is the default behavior when sending metrics - DefaultReceivingMode = MutexMode - // DefaultChannelModeBufferSize is the default size of the channel holding incoming metrics - DefaultChannelModeBufferSize = 4096 - // DefaultAggregationFlushInterval is the default interval for the aggregator to flush metrics. - DefaultAggregationFlushInterval = 3 * time.Second - // DefaultAggregation - DefaultAggregation = false - // DefaultExtendedAggregation - DefaultExtendedAggregation = false - // DefaultDevMode - DefaultDevMode = false -) - -// Options contains the configuration options for a client. -type Options struct { - // Namespace to prepend to all metrics, events and service checks name. - Namespace string - // Tags are global tags to be applied to every metrics, events and service checks. - Tags []string - // MaxBytesPerPayload is the maximum number of bytes a single payload will contain. - // The magic value 0 will set the option to the optimal size for the transport - // protocol used when creating the client: 1432 for UDP and 8192 for UDS. - MaxBytesPerPayload int - // MaxMessagesPerPayload is the maximum number of metrics, events and/or service checks a single payload will contain. - // This option can be set to `1` to create an unbuffered client. - MaxMessagesPerPayload int - // BufferPoolSize is the size of the pool of buffers in number of buffers. - // The magic value 0 will set the option to the optimal size for the transport - // protocol used when creating the client: 2048 for UDP and 512 for UDS. - BufferPoolSize int - // BufferFlushInterval is the interval after which the current buffer will get flushed. - BufferFlushInterval time.Duration - // BufferShardCount is the number of buffer "shards" that will be used. - // Those shards allows the use of multiple buffers at the same time to reduce - // lock contention. - BufferShardCount int - // SenderQueueSize is the size of the sender queue in number of buffers. - // The magic value 0 will set the option to the optimal size for the transport - // protocol used when creating the client: 2048 for UDP and 512 for UDS. - SenderQueueSize int - // WriteTimeoutUDS is the timeout after which a UDS packet is dropped. - WriteTimeoutUDS time.Duration - // Telemetry is a set of metrics automatically injected by the client in the - // dogstatsd stream to be able to monitor the client itself. - Telemetry bool - // ReceiveMode determins the behavior of the client when receiving to many - // metrics. The client will either drop the metrics if its buffers are - // full (ChannelMode mode) or block the caller until the metric can be - // handled (MutexMode mode). By default the client will MutexMode. This - // option should be set to ChannelMode only when use under very high - // load. - // - // MutexMode uses a mutex internally which is much faster than - // channel but causes some lock contention when used with a high number - // of threads. Mutex are sharded based on the metrics name which - // limit mutex contention when goroutines send different metrics. - // - // ChannelMode: uses channel (of ChannelModeBufferSize size) to send - // metrics and drop metrics if the channel is full. Sending metrics in - // this mode is slower that MutexMode (because of the channel), but - // will not block the application. This mode is made for application - // using many goroutines, sending the same metrics at a very high - // volume. The goal is to not slow down the application at the cost of - // dropping metrics and having a lower max throughput. - ReceiveMode ReceivingMode - // ChannelModeBufferSize is the size of the channel holding incoming metrics - ChannelModeBufferSize int - // AggregationFlushInterval is the interval for the aggregator to flush metrics - AggregationFlushInterval time.Duration - // [beta] Aggregation enables/disables client side aggregation for - // Gauges, Counts and Sets (compatible with every Agent's version). - Aggregation bool - // [beta] Extended aggregation enables/disables client side aggregation - // for all types. This feature is only compatible with Agent's versions - // >=7.25.0 or Agent's version >=6.25.0 && < 7.0.0. - ExtendedAggregation bool - // TelemetryAddr specify a different endpoint for telemetry metrics. - TelemetryAddr string - // DevMode enables the "dev" mode where the client sends much more - // telemetry metrics to help troubleshooting the client behavior. - DevMode bool -} - -func resolveOptions(options []Option) (*Options, error) { - o := &Options{ - Namespace: DefaultNamespace, - Tags: DefaultTags, - MaxBytesPerPayload: DefaultMaxBytesPerPayload, - MaxMessagesPerPayload: DefaultMaxMessagesPerPayload, - BufferPoolSize: DefaultBufferPoolSize, - BufferFlushInterval: DefaultBufferFlushInterval, - BufferShardCount: DefaultBufferShardCount, - SenderQueueSize: DefaultSenderQueueSize, - WriteTimeoutUDS: DefaultWriteTimeoutUDS, - Telemetry: DefaultTelemetry, - ReceiveMode: DefaultReceivingMode, - ChannelModeBufferSize: DefaultChannelModeBufferSize, - AggregationFlushInterval: DefaultAggregationFlushInterval, - Aggregation: DefaultAggregation, - ExtendedAggregation: DefaultExtendedAggregation, - DevMode: DefaultDevMode, - } - - for _, option := range options { - err := option(o) - if err != nil { - return nil, err - } - } - - return o, nil -} - -// Option is a client option. Can return an error if validation fails. -type Option func(*Options) error - -// WithNamespace sets the Namespace option. -func WithNamespace(namespace string) Option { - return func(o *Options) error { - if strings.HasSuffix(namespace, ".") { - o.Namespace = namespace - } else { - o.Namespace = namespace + "." - } - return nil - } -} - -// WithTags sets the Tags option. -func WithTags(tags []string) Option { - return func(o *Options) error { - o.Tags = tags - return nil - } -} - -// WithMaxMessagesPerPayload sets the MaxMessagesPerPayload option. -func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option { - return func(o *Options) error { - o.MaxMessagesPerPayload = maxMessagesPerPayload - return nil - } -} - -// WithMaxBytesPerPayload sets the MaxBytesPerPayload option. -func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option { - return func(o *Options) error { - o.MaxBytesPerPayload = MaxBytesPerPayload - return nil - } -} - -// WithBufferPoolSize sets the BufferPoolSize option. -func WithBufferPoolSize(bufferPoolSize int) Option { - return func(o *Options) error { - o.BufferPoolSize = bufferPoolSize - return nil - } -} - -// WithBufferFlushInterval sets the BufferFlushInterval option. -func WithBufferFlushInterval(bufferFlushInterval time.Duration) Option { - return func(o *Options) error { - o.BufferFlushInterval = bufferFlushInterval - return nil - } -} - -// WithBufferShardCount sets the BufferShardCount option. -func WithBufferShardCount(bufferShardCount int) Option { - return func(o *Options) error { - o.BufferShardCount = bufferShardCount - return nil - } -} - -// WithSenderQueueSize sets the SenderQueueSize option. -func WithSenderQueueSize(senderQueueSize int) Option { - return func(o *Options) error { - o.SenderQueueSize = senderQueueSize - return nil - } -} - -// WithWriteTimeoutUDS sets the WriteTimeoutUDS option. -func WithWriteTimeoutUDS(writeTimeoutUDS time.Duration) Option { - return func(o *Options) error { - o.WriteTimeoutUDS = writeTimeoutUDS - return nil - } -} - -// WithoutTelemetry disables the telemetry -func WithoutTelemetry() Option { - return func(o *Options) error { - o.Telemetry = false - return nil - } -} - -// WithChannelMode will use channel to receive metrics -func WithChannelMode() Option { - return func(o *Options) error { - o.ReceiveMode = ChannelMode - return nil - } -} - -// WithMutexMode will use mutex to receive metrics -func WithMutexMode() Option { - return func(o *Options) error { - o.ReceiveMode = MutexMode - return nil - } -} - -// WithChannelModeBufferSize the channel buffer size when using "drop mode" -func WithChannelModeBufferSize(bufferSize int) Option { - return func(o *Options) error { - o.ChannelModeBufferSize = bufferSize - return nil - } -} - -// WithAggregationInterval set the aggregation interval -func WithAggregationInterval(interval time.Duration) Option { - return func(o *Options) error { - o.AggregationFlushInterval = interval - return nil - } -} - -// WithClientSideAggregation enables client side aggregation for Gauges, Counts -// and Sets. Client side aggregation is a beta feature. -func WithClientSideAggregation() Option { - return func(o *Options) error { - o.Aggregation = true - return nil - } -} - -// WithoutClientSideAggregation disables client side aggregation. -func WithoutClientSideAggregation() Option { - return func(o *Options) error { - o.Aggregation = false - o.ExtendedAggregation = false - return nil - } -} - -// WithExtendedClientSideAggregation enables client side aggregation for all -// types. This feature is only compatible with Agent's version >=6.25.0 && -// <7.0.0 or Agent's versions >=7.25.0. Client side aggregation is a beta -// feature. -func WithExtendedClientSideAggregation() Option { - return func(o *Options) error { - o.Aggregation = true - o.ExtendedAggregation = true - return nil - } -} - -// WithTelemetryAddr specify a different address for telemetry metrics. -func WithTelemetryAddr(addr string) Option { - return func(o *Options) error { - o.TelemetryAddr = addr - return nil - } -} - -// WithDevMode enables client "dev" mode, sending more Telemetry metrics to -// help troubleshoot client behavior. -func WithDevMode() Option { - return func(o *Options) error { - o.DevMode = true - return nil - } -} - -// WithoutDevMode disables client "dev" mode, sending more Telemetry metrics to -// help troubleshoot client behavior. -func WithoutDevMode() Option { - return func(o *Options) error { - o.DevMode = false - return nil - } -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/pipe.go b/vendor/github.com/DataDog/datadog-go/statsd/pipe.go deleted file mode 100644 index 0d098a18..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/pipe.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package statsd - -import "errors" - -func newWindowsPipeWriter(pipepath string) (statsdWriter, error) { - return nil, errors.New("Windows Named Pipes are only supported on Windows") -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/pipe_windows.go b/vendor/github.com/DataDog/datadog-go/statsd/pipe_windows.go deleted file mode 100644 index f533b024..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/pipe_windows.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package statsd - -import ( - "net" - "sync" - "time" - - "github.com/Microsoft/go-winio" -) - -const defaultPipeTimeout = 1 * time.Millisecond - -type pipeWriter struct { - mu sync.RWMutex - conn net.Conn - timeout time.Duration - pipepath string -} - -func (p *pipeWriter) SetWriteTimeout(d time.Duration) error { - p.mu.Lock() - p.timeout = d - p.mu.Unlock() - return nil -} - -func (p *pipeWriter) Write(data []byte) (n int, err error) { - conn, err := p.ensureConnection() - if err != nil { - return 0, err - } - - p.mu.RLock() - conn.SetWriteDeadline(time.Now().Add(p.timeout)) - p.mu.RUnlock() - - n, err = conn.Write(data) - if err != nil { - if e, ok := err.(net.Error); !ok || !e.Temporary() { - // disconnected; retry again on next attempt - p.mu.Lock() - p.conn = nil - p.mu.Unlock() - } - } - return n, err -} - -func (p *pipeWriter) ensureConnection() (net.Conn, error) { - p.mu.RLock() - conn := p.conn - p.mu.RUnlock() - if conn != nil { - return conn, nil - } - - // looks like we might need to connect - try again with write locking. - p.mu.Lock() - defer p.mu.Unlock() - if p.conn != nil { - return p.conn, nil - } - newconn, err := winio.DialPipe(p.pipepath, nil) - if err != nil { - return nil, err - } - p.conn = newconn - return newconn, nil -} - -func (p *pipeWriter) Close() error { - return p.conn.Close() -} - -func newWindowsPipeWriter(pipepath string) (*pipeWriter, error) { - // Defer connection establishment to first write - return &pipeWriter{ - conn: nil, - timeout: defaultPipeTimeout, - pipepath: pipepath, - }, nil -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/sender.go b/vendor/github.com/DataDog/datadog-go/statsd/sender.go deleted file mode 100644 index 4c8eb269..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/sender.go +++ /dev/null @@ -1,130 +0,0 @@ -package statsd - -import ( - "sync/atomic" - "time" -) - -// A statsdWriter offers a standard interface regardless of the underlying -// protocol. For now UDS and UPD writers are available. -// Attention: the underlying buffer of `data` is reused after a `statsdWriter.Write` call. -// `statsdWriter.Write` must be synchronous. -type statsdWriter interface { - Write(data []byte) (n int, err error) - SetWriteTimeout(time.Duration) error - Close() error -} - -// SenderMetrics contains metrics about the health of the sender -type SenderMetrics struct { - TotalSentBytes uint64 - TotalSentPayloads uint64 - TotalDroppedPayloads uint64 - TotalDroppedBytes uint64 - TotalDroppedPayloadsQueueFull uint64 - TotalDroppedBytesQueueFull uint64 - TotalDroppedPayloadsWriter uint64 - TotalDroppedBytesWriter uint64 -} - -type sender struct { - transport statsdWriter - pool *bufferPool - queue chan *statsdBuffer - metrics *SenderMetrics - stop chan struct{} - flushSignal chan struct{} -} - -func newSender(transport statsdWriter, queueSize int, pool *bufferPool) *sender { - sender := &sender{ - transport: transport, - pool: pool, - queue: make(chan *statsdBuffer, queueSize), - metrics: &SenderMetrics{}, - stop: make(chan struct{}), - flushSignal: make(chan struct{}), - } - - go sender.sendLoop() - return sender -} - -func (s *sender) send(buffer *statsdBuffer) { - select { - case s.queue <- buffer: - default: - atomic.AddUint64(&s.metrics.TotalDroppedPayloads, 1) - atomic.AddUint64(&s.metrics.TotalDroppedBytes, uint64(len(buffer.bytes()))) - atomic.AddUint64(&s.metrics.TotalDroppedPayloadsQueueFull, 1) - atomic.AddUint64(&s.metrics.TotalDroppedBytesQueueFull, uint64(len(buffer.bytes()))) - s.pool.returnBuffer(buffer) - } -} - -func (s *sender) write(buffer *statsdBuffer) { - _, err := s.transport.Write(buffer.bytes()) - if err != nil { - atomic.AddUint64(&s.metrics.TotalDroppedPayloads, 1) - atomic.AddUint64(&s.metrics.TotalDroppedBytes, uint64(len(buffer.bytes()))) - atomic.AddUint64(&s.metrics.TotalDroppedPayloadsWriter, 1) - atomic.AddUint64(&s.metrics.TotalDroppedBytesWriter, uint64(len(buffer.bytes()))) - } else { - atomic.AddUint64(&s.metrics.TotalSentPayloads, 1) - atomic.AddUint64(&s.metrics.TotalSentBytes, uint64(len(buffer.bytes()))) - } - s.pool.returnBuffer(buffer) -} - -func (s *sender) flushTelemetryMetrics() SenderMetrics { - return SenderMetrics{ - TotalSentBytes: atomic.SwapUint64(&s.metrics.TotalSentBytes, 0), - TotalSentPayloads: atomic.SwapUint64(&s.metrics.TotalSentPayloads, 0), - TotalDroppedPayloads: atomic.SwapUint64(&s.metrics.TotalDroppedPayloads, 0), - TotalDroppedBytes: atomic.SwapUint64(&s.metrics.TotalDroppedBytes, 0), - TotalDroppedPayloadsQueueFull: atomic.SwapUint64(&s.metrics.TotalDroppedPayloadsQueueFull, 0), - TotalDroppedBytesQueueFull: atomic.SwapUint64(&s.metrics.TotalDroppedBytesQueueFull, 0), - TotalDroppedPayloadsWriter: atomic.SwapUint64(&s.metrics.TotalDroppedPayloadsWriter, 0), - TotalDroppedBytesWriter: atomic.SwapUint64(&s.metrics.TotalDroppedBytesWriter, 0), - } -} - -func (s *sender) sendLoop() { - defer close(s.stop) - for { - select { - case buffer := <-s.queue: - s.write(buffer) - case <-s.stop: - return - case <-s.flushSignal: - // At that point we know that the workers are paused (the statsd client - // will pause them before calling sender.flush()). - // So we can fully flush the input queue - s.flushInputQueue() - s.flushSignal <- struct{}{} - } - } -} - -func (s *sender) flushInputQueue() { - for { - select { - case buffer := <-s.queue: - s.write(buffer) - default: - return - } - } -} -func (s *sender) flush() { - s.flushSignal <- struct{}{} - <-s.flushSignal -} - -func (s *sender) close() error { - s.stop <- struct{}{} - <-s.stop - s.flushInputQueue() - return s.transport.Close() -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/service_check.go b/vendor/github.com/DataDog/datadog-go/statsd/service_check.go deleted file mode 100644 index fce86755..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/service_check.go +++ /dev/null @@ -1,70 +0,0 @@ -package statsd - -import ( - "fmt" - "time" -) - -// ServiceCheckStatus support -type ServiceCheckStatus byte - -const ( - // Ok is the "ok" ServiceCheck status - Ok ServiceCheckStatus = 0 - // Warn is the "warning" ServiceCheck status - Warn ServiceCheckStatus = 1 - // Critical is the "critical" ServiceCheck status - Critical ServiceCheckStatus = 2 - // Unknown is the "unknown" ServiceCheck status - Unknown ServiceCheckStatus = 3 -) - -// A ServiceCheck is an object that contains status of DataDog service check. -type ServiceCheck struct { - // Name of the service check. Required. - Name string - // Status of service check. Required. - Status ServiceCheckStatus - // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd - // server will set this to the current time. - Timestamp time.Time - // Hostname for the serviceCheck. - Hostname string - // A message describing the current state of the serviceCheck. - Message string - // Tags for the serviceCheck. - Tags []string -} - -// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking -// against these values is done at send-time, or upon running sc.Check. -func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { - return &ServiceCheck{ - Name: name, - Status: status, - } -} - -// Check verifies that a service check is valid. -func (sc ServiceCheck) Check() error { - if len(sc.Name) == 0 { - return fmt.Errorf("statsd.ServiceCheck name is required") - } - if byte(sc.Status) < 0 || byte(sc.Status) > 3 { - return fmt.Errorf("statsd.ServiceCheck status has invalid value") - } - return nil -} - -// Encode returns the dogstatsd wire protocol representation for a service check. -// Tags may be passed which will be added to the encoded output but not to -// the Service Check's list of tags, eg. for default tags. -func (sc ServiceCheck) Encode(tags ...string) (string, error) { - err := sc.Check() - if err != nil { - return "", err - } - var buffer []byte - buffer = appendServiceCheck(buffer, sc, tags) - return string(buffer), nil -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go deleted file mode 100644 index 37dd31e3..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go +++ /dev/null @@ -1,657 +0,0 @@ -// Copyright 2013 Ooyala, Inc. - -/* -Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd, -adding tags and histograms and pushing upstream to Datadog. - -Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD. - -statsd is based on go-statsd-client. -*/ -package statsd - -import ( - "errors" - "fmt" - "os" - "strings" - "sync" - "sync/atomic" - "time" -) - -/* -OptimalUDPPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes -is optimal for regular networks with an MTU of 1500 so datagrams don't get -fragmented. It's generally recommended not to fragment UDP datagrams as losing -a single fragment will cause the entire datagram to be lost. -*/ -const OptimalUDPPayloadSize = 1432 - -/* -MaxUDPPayloadSize defines the maximum payload size for a UDP datagram. -Its value comes from the calculation: 65535 bytes Max UDP datagram size - -8byte UDP header - 60byte max IP headers -any number greater than that will see frames being cut out. -*/ -const MaxUDPPayloadSize = 65467 - -// DefaultUDPBufferPoolSize is the default size of the buffer pool for UDP clients. -const DefaultUDPBufferPoolSize = 2048 - -// DefaultUDSBufferPoolSize is the default size of the buffer pool for UDS clients. -const DefaultUDSBufferPoolSize = 512 - -/* -DefaultMaxAgentPayloadSize is the default maximum payload size the agent -can receive. This can be adjusted by changing dogstatsd_buffer_size in the -agent configuration file datadog.yaml. This is also used as the optimal payload size -for UDS datagrams. -*/ -const DefaultMaxAgentPayloadSize = 8192 - -/* -UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket -traffic instead of UDP. -*/ -const UnixAddressPrefix = "unix://" - -/* -WindowsPipeAddressPrefix holds the prefix to use to enable Windows Named Pipes -traffic instead of UDP. -*/ -const WindowsPipeAddressPrefix = `\\.\pipe\` - -const ( - agentHostEnvVarName = "DD_AGENT_HOST" - agentPortEnvVarName = "DD_DOGSTATSD_PORT" - defaultUDPPort = "8125" -) - -/* -ddEnvTagsMapping is a mapping of each "DD_" prefixed environment variable -to a specific tag name. -*/ -var ddEnvTagsMapping = map[string]string{ - // Client-side entity ID injection for container tagging. - "DD_ENTITY_ID": "dd.internal.entity_id", - // The name of the env in which the service runs. - "DD_ENV": "env", - // The name of the running service. - "DD_SERVICE": "service", - // The current version of the running service. - "DD_VERSION": "version", -} - -type metricType int - -const ( - gauge metricType = iota - count - histogram - histogramAggregated - distribution - distributionAggregated - set - timing - timingAggregated - event - serviceCheck -) - -type ReceivingMode int - -const ( - MutexMode ReceivingMode = iota - ChannelMode -) - -const ( - WriterNameUDP string = "udp" - WriterNameUDS string = "uds" - WriterWindowsPipe string = "pipe" -) - -type metric struct { - metricType metricType - namespace string - globalTags []string - name string - fvalue float64 - fvalues []float64 - ivalue int64 - svalue string - evalue *Event - scvalue *ServiceCheck - tags []string - stags string - rate float64 -} - -type noClientErr string - -// ErrNoClient is returned if statsd reporting methods are invoked on -// a nil client. -const ErrNoClient = noClientErr("statsd client is nil") - -func (e noClientErr) Error() string { - return string(e) -} - -// ClientInterface is an interface that exposes the common client functions for the -// purpose of being able to provide a no-op client or even mocking. This can aid -// downstream users' with their testing. -type ClientInterface interface { - // Gauge measures the value of a metric at a particular time. - Gauge(name string, value float64, tags []string, rate float64) error - - // Count tracks how many times something happened per second. - Count(name string, value int64, tags []string, rate float64) error - - // Histogram tracks the statistical distribution of a set of values on each host. - Histogram(name string, value float64, tags []string, rate float64) error - - // Distribution tracks the statistical distribution of a set of values across your infrastructure. - Distribution(name string, value float64, tags []string, rate float64) error - - // Decr is just Count of -1 - Decr(name string, tags []string, rate float64) error - - // Incr is just Count of 1 - Incr(name string, tags []string, rate float64) error - - // Set counts the number of unique elements in a group. - Set(name string, value string, tags []string, rate float64) error - - // Timing sends timing information, it is an alias for TimeInMilliseconds - Timing(name string, value time.Duration, tags []string, rate float64) error - - // TimeInMilliseconds sends timing information in milliseconds. - // It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) - TimeInMilliseconds(name string, value float64, tags []string, rate float64) error - - // Event sends the provided Event. - Event(e *Event) error - - // SimpleEvent sends an event with the provided title and text. - SimpleEvent(title, text string) error - - // ServiceCheck sends the provided ServiceCheck. - ServiceCheck(sc *ServiceCheck) error - - // SimpleServiceCheck sends an serviceCheck with the provided name and status. - SimpleServiceCheck(name string, status ServiceCheckStatus) error - - // Close the client connection. - Close() error - - // Flush forces a flush of all the queued dogstatsd payloads. - Flush() error - - // SetWriteTimeout allows the user to set a custom write timeout. - SetWriteTimeout(d time.Duration) error -} - -// A Client is a handle for sending messages to dogstatsd. It is safe to -// use one Client from multiple goroutines simultaneously. -type Client struct { - // Sender handles the underlying networking protocol - sender *sender - // Namespace to prepend to all statsd calls - Namespace string - // Tags are global tags to be added to every statsd call - Tags []string - // skipErrors turns off error passing and allows UDS to emulate UDP behaviour - SkipErrors bool - flushTime time.Duration - metrics *ClientMetrics - telemetry *telemetryClient - stop chan struct{} - wg sync.WaitGroup - workers []*worker - closerLock sync.Mutex - receiveMode ReceivingMode - agg *aggregator - aggHistDist *aggregator - options []Option - addrOption string -} - -// ClientMetrics contains metrics about the client -type ClientMetrics struct { - TotalMetrics uint64 - TotalMetricsGauge uint64 - TotalMetricsCount uint64 - TotalMetricsHistogram uint64 - TotalMetricsDistribution uint64 - TotalMetricsSet uint64 - TotalMetricsTiming uint64 - TotalEvents uint64 - TotalServiceChecks uint64 - TotalDroppedOnReceive uint64 -} - -// Verify that Client implements the ClientInterface. -// https://golang.org/doc/faq#guarantee_satisfies_interface -var _ ClientInterface = &Client{} - -func resolveAddr(addr string) string { - envPort := "" - if addr == "" { - addr = os.Getenv(agentHostEnvVarName) - envPort = os.Getenv(agentPortEnvVarName) - } - - if addr == "" { - return "" - } - - if !strings.HasPrefix(addr, WindowsPipeAddressPrefix) && !strings.HasPrefix(addr, UnixAddressPrefix) { - if !strings.Contains(addr, ":") { - if envPort != "" { - addr = fmt.Sprintf("%s:%s", addr, envPort) - } else { - addr = fmt.Sprintf("%s:%s", addr, defaultUDPPort) - } - } - } - return addr -} - -func createWriter(addr string) (statsdWriter, string, error) { - addr = resolveAddr(addr) - if addr == "" { - return nil, "", errors.New("No address passed and autodetection from environment failed") - } - - switch { - case strings.HasPrefix(addr, WindowsPipeAddressPrefix): - w, err := newWindowsPipeWriter(addr) - return w, WriterWindowsPipe, err - case strings.HasPrefix(addr, UnixAddressPrefix): - w, err := newUDSWriter(addr[len(UnixAddressPrefix):]) - return w, WriterNameUDS, err - default: - w, err := newUDPWriter(addr) - return w, WriterNameUDP, err - } -} - -// New returns a pointer to a new Client given an addr in the format "hostname:port" for UDP, -// "unix:///path/to/socket" for UDS or "\\.\pipe\path\to\pipe" for Windows Named Pipes. -func New(addr string, options ...Option) (*Client, error) { - o, err := resolveOptions(options) - if err != nil { - return nil, err - } - - w, writerType, err := createWriter(addr) - if err != nil { - return nil, err - } - - client, err := newWithWriter(w, o, writerType) - if err == nil { - client.options = append(client.options, options...) - client.addrOption = addr - } - return client, err -} - -// NewWithWriter creates a new Client with given writer. Writer is a -// io.WriteCloser + SetWriteTimeout(time.Duration) error -func NewWithWriter(w statsdWriter, options ...Option) (*Client, error) { - o, err := resolveOptions(options) - if err != nil { - return nil, err - } - return newWithWriter(w, o, "custom") -} - -// CloneWithExtraOptions create a new Client with extra options -func CloneWithExtraOptions(c *Client, options ...Option) (*Client, error) { - if c == nil { - return nil, ErrNoClient - } - - if c.addrOption == "" { - return nil, fmt.Errorf("can't clone client with no addrOption") - } - opt := append(c.options, options...) - return New(c.addrOption, opt...) -} - -func newWithWriter(w statsdWriter, o *Options, writerName string) (*Client, error) { - - w.SetWriteTimeout(o.WriteTimeoutUDS) - - c := Client{ - Namespace: o.Namespace, - Tags: o.Tags, - metrics: &ClientMetrics{}, - } - if o.Aggregation || o.ExtendedAggregation { - c.agg = newAggregator(&c) - c.agg.start(o.AggregationFlushInterval) - - if o.ExtendedAggregation { - c.aggHistDist = c.agg - } - } - - // Inject values of DD_* environment variables as global tags. - for envName, tagName := range ddEnvTagsMapping { - if value := os.Getenv(envName); value != "" { - c.Tags = append(c.Tags, fmt.Sprintf("%s:%s", tagName, value)) - } - } - - if o.MaxBytesPerPayload == 0 { - if writerName == WriterNameUDS { - o.MaxBytesPerPayload = DefaultMaxAgentPayloadSize - } else { - o.MaxBytesPerPayload = OptimalUDPPayloadSize - } - } - if o.BufferPoolSize == 0 { - if writerName == WriterNameUDS { - o.BufferPoolSize = DefaultUDSBufferPoolSize - } else { - o.BufferPoolSize = DefaultUDPBufferPoolSize - } - } - if o.SenderQueueSize == 0 { - if writerName == WriterNameUDS { - o.SenderQueueSize = DefaultUDSBufferPoolSize - } else { - o.SenderQueueSize = DefaultUDPBufferPoolSize - } - } - - bufferPool := newBufferPool(o.BufferPoolSize, o.MaxBytesPerPayload, o.MaxMessagesPerPayload) - c.sender = newSender(w, o.SenderQueueSize, bufferPool) - c.receiveMode = o.ReceiveMode - for i := 0; i < o.BufferShardCount; i++ { - w := newWorker(bufferPool, c.sender) - c.workers = append(c.workers, w) - if c.receiveMode == ChannelMode { - w.startReceivingMetric(o.ChannelModeBufferSize) - } - } - - c.flushTime = o.BufferFlushInterval - c.stop = make(chan struct{}, 1) - - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.watch() - }() - - if o.Telemetry { - if o.TelemetryAddr == "" { - c.telemetry = newTelemetryClient(&c, writerName, o.DevMode) - } else { - var err error - c.telemetry, err = newTelemetryClientWithCustomAddr(&c, writerName, o.DevMode, o.TelemetryAddr, bufferPool) - if err != nil { - return nil, err - } - } - c.telemetry.run(&c.wg, c.stop) - } - - return &c, nil -} - -// NewBuffered returns a Client that buffers its output and sends it in chunks. -// Buflen is the length of the buffer in number of commands. -// -// When addr is empty, the client will default to a UDP client and use the DD_AGENT_HOST -// and (optionally) the DD_DOGSTATSD_PORT environment variables to build the target address. -func NewBuffered(addr string, buflen int) (*Client, error) { - return New(addr, WithMaxMessagesPerPayload(buflen)) -} - -// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP -// or Windows Pipes. -func (c *Client) SetWriteTimeout(d time.Duration) error { - if c == nil { - return ErrNoClient - } - return c.sender.transport.SetWriteTimeout(d) -} - -func (c *Client) watch() { - ticker := time.NewTicker(c.flushTime) - - for { - select { - case <-ticker.C: - for _, w := range c.workers { - w.flush() - } - case <-c.stop: - ticker.Stop() - return - } - } -} - -// Flush forces a flush of all the queued dogstatsd payloads This method is -// blocking and will not return until everything is sent through the network. -// In MutexMode, this will also block sampling new data to the client while the -// workers and sender are flushed. -func (c *Client) Flush() error { - if c == nil { - return ErrNoClient - } - if c.agg != nil { - c.agg.sendMetrics() - } - for _, w := range c.workers { - w.pause() - defer w.unpause() - w.flushUnsafe() - } - // Now that the worker are pause the sender can flush the queue between - // worker and senders - c.sender.flush() - return nil -} - -func (c *Client) FlushTelemetryMetrics() ClientMetrics { - cm := ClientMetrics{ - TotalMetricsGauge: atomic.SwapUint64(&c.metrics.TotalMetricsGauge, 0), - TotalMetricsCount: atomic.SwapUint64(&c.metrics.TotalMetricsCount, 0), - TotalMetricsSet: atomic.SwapUint64(&c.metrics.TotalMetricsSet, 0), - TotalMetricsHistogram: atomic.SwapUint64(&c.metrics.TotalMetricsHistogram, 0), - TotalMetricsDistribution: atomic.SwapUint64(&c.metrics.TotalMetricsDistribution, 0), - TotalMetricsTiming: atomic.SwapUint64(&c.metrics.TotalMetricsTiming, 0), - TotalEvents: atomic.SwapUint64(&c.metrics.TotalEvents, 0), - TotalServiceChecks: atomic.SwapUint64(&c.metrics.TotalServiceChecks, 0), - TotalDroppedOnReceive: atomic.SwapUint64(&c.metrics.TotalDroppedOnReceive, 0), - } - - cm.TotalMetrics = cm.TotalMetricsGauge + cm.TotalMetricsCount + - cm.TotalMetricsSet + cm.TotalMetricsHistogram + - cm.TotalMetricsDistribution + cm.TotalMetricsTiming - - return cm -} - -func (c *Client) send(m metric) error { - if c == nil { - return ErrNoClient - } - - m.globalTags = c.Tags - m.namespace = c.Namespace - - h := hashString32(m.name) - worker := c.workers[h%uint32(len(c.workers))] - - if c.receiveMode == ChannelMode { - select { - case worker.inputMetrics <- m: - default: - atomic.AddUint64(&c.metrics.TotalDroppedOnReceive, 1) - } - return nil - } - return worker.processMetric(m) -} - -// Gauge measures the value of a metric at a particular time. -func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalMetricsGauge, 1) - if c.agg != nil { - return c.agg.gauge(name, value, tags) - } - return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate}) -} - -// Count tracks how many times something happened per second. -func (c *Client) Count(name string, value int64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalMetricsCount, 1) - if c.agg != nil { - return c.agg.count(name, value, tags) - } - return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate}) -} - -// Histogram tracks the statistical distribution of a set of values on each host. -func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalMetricsHistogram, 1) - if c.aggHistDist != nil { - return c.agg.histogram(name, value, tags) - } - return c.send(metric{metricType: histogram, name: name, fvalue: value, tags: tags, rate: rate}) -} - -// Distribution tracks the statistical distribution of a set of values across your infrastructure. -func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalMetricsDistribution, 1) - if c.aggHistDist != nil { - return c.agg.distribution(name, value, tags) - } - return c.send(metric{metricType: distribution, name: name, fvalue: value, tags: tags, rate: rate}) -} - -// Decr is just Count of -1 -func (c *Client) Decr(name string, tags []string, rate float64) error { - return c.Count(name, -1, tags, rate) -} - -// Incr is just Count of 1 -func (c *Client) Incr(name string, tags []string, rate float64) error { - return c.Count(name, 1, tags, rate) -} - -// Set counts the number of unique elements in a group. -func (c *Client) Set(name string, value string, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalMetricsSet, 1) - if c.agg != nil { - return c.agg.set(name, value, tags) - } - return c.send(metric{metricType: set, name: name, svalue: value, tags: tags, rate: rate}) -} - -// Timing sends timing information, it is an alias for TimeInMilliseconds -func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error { - return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate) -} - -// TimeInMilliseconds sends timing information in milliseconds. -// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) -func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalMetricsTiming, 1) - if c.aggHistDist != nil { - return c.agg.timing(name, value, tags) - } - return c.send(metric{metricType: timing, name: name, fvalue: value, tags: tags, rate: rate}) -} - -// Event sends the provided Event. -func (c *Client) Event(e *Event) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalEvents, 1) - return c.send(metric{metricType: event, evalue: e, rate: 1}) -} - -// SimpleEvent sends an event with the provided title and text. -func (c *Client) SimpleEvent(title, text string) error { - e := NewEvent(title, text) - return c.Event(e) -} - -// ServiceCheck sends the provided ServiceCheck. -func (c *Client) ServiceCheck(sc *ServiceCheck) error { - if c == nil { - return ErrNoClient - } - atomic.AddUint64(&c.metrics.TotalServiceChecks, 1) - return c.send(metric{metricType: serviceCheck, scvalue: sc, rate: 1}) -} - -// SimpleServiceCheck sends an serviceCheck with the provided name and status. -func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error { - sc := NewServiceCheck(name, status) - return c.ServiceCheck(sc) -} - -// Close the client connection. -func (c *Client) Close() error { - if c == nil { - return ErrNoClient - } - - // Acquire closer lock to ensure only one thread can close the stop channel - c.closerLock.Lock() - defer c.closerLock.Unlock() - - // Notify all other threads that they should stop - select { - case <-c.stop: - return nil - default: - } - close(c.stop) - - if c.receiveMode == ChannelMode { - for _, w := range c.workers { - w.stopReceivingMetric() - } - } - - // Wait for the threads to stop - c.wg.Wait() - - // Finally flush any remaining metrics that may have come in at the last moment - if c.agg != nil { - c.agg.stop() - } - c.Flush() - - return c.sender.close() -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/telemetry.go b/vendor/github.com/DataDog/datadog-go/statsd/telemetry.go deleted file mode 100644 index 7a992e5c..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/telemetry.go +++ /dev/null @@ -1,151 +0,0 @@ -package statsd - -import ( - "fmt" - "sync" - "time" -) - -/* -TelemetryInterval is the interval at which telemetry will be sent by the client. -*/ -const TelemetryInterval = 10 * time.Second - -/* -clientTelemetryTag is a tag identifying this specific client. -*/ -var clientTelemetryTag = "client:go" - -/* -clientVersionTelemetryTag is a tag identifying this specific client version. -*/ -var clientVersionTelemetryTag = "client_version:4.5.1" - -type telemetryClient struct { - c *Client - tags []string - tagsByType map[metricType][]string - sender *sender - worker *worker - devMode bool -} - -func newTelemetryClient(c *Client, transport string, devMode bool) *telemetryClient { - t := &telemetryClient{ - c: c, - tags: append(c.Tags, clientTelemetryTag, clientVersionTelemetryTag, "client_transport:"+transport), - tagsByType: map[metricType][]string{}, - devMode: devMode, - } - - if devMode { - t.tagsByType[gauge] = append(append([]string{}, t.tags...), "metrics_type:gauge") - t.tagsByType[count] = append(append([]string{}, t.tags...), "metrics_type:count") - t.tagsByType[set] = append(append([]string{}, t.tags...), "metrics_type:set") - t.tagsByType[timing] = append(append([]string{}, t.tags...), "metrics_type:timing") - t.tagsByType[histogram] = append(append([]string{}, t.tags...), "metrics_type:histogram") - t.tagsByType[distribution] = append(append([]string{}, t.tags...), "metrics_type:distribution") - t.tagsByType[timing] = append(append([]string{}, t.tags...), "metrics_type:timing") - } - return t -} - -func newTelemetryClientWithCustomAddr(c *Client, transport string, devMode bool, telemetryAddr string, pool *bufferPool) (*telemetryClient, error) { - telemetryWriter, _, err := createWriter(telemetryAddr) - if err != nil { - return nil, fmt.Errorf("Could not resolve telemetry address: %v", err) - } - - t := newTelemetryClient(c, transport, devMode) - - // Creating a custom sender/worker with 1 worker in mutex mode for the - // telemetry that share the same bufferPool. - // FIXME due to performance pitfall, we're always using UDP defaults - // even for UDS. - t.sender = newSender(telemetryWriter, DefaultUDPBufferPoolSize, pool) - t.worker = newWorker(pool, t.sender) - return t, nil -} - -func (t *telemetryClient) run(wg *sync.WaitGroup, stop chan struct{}) { - wg.Add(1) - go func() { - defer wg.Done() - ticker := time.NewTicker(TelemetryInterval) - for { - select { - case <-ticker.C: - t.sendTelemetry() - case <-stop: - ticker.Stop() - if t.sender != nil { - t.sender.close() - } - return - } - } - }() -} - -func (t *telemetryClient) sendTelemetry() { - for _, m := range t.flush() { - if t.worker != nil { - t.worker.processMetric(m) - } else { - t.c.send(m) - } - } - - if t.worker != nil { - t.worker.flush() - } -} - -// flushTelemetry returns Telemetry metrics to be flushed. It's its own function to ease testing. -func (t *telemetryClient) flush() []metric { - m := []metric{} - - // same as Count but without global namespace - telemetryCount := func(name string, value int64, tags []string) { - m = append(m, metric{metricType: count, name: name, ivalue: value, tags: tags, rate: 1}) - } - - clientMetrics := t.c.FlushTelemetryMetrics() - telemetryCount("datadog.dogstatsd.client.metrics", int64(clientMetrics.TotalMetrics), t.tags) - if t.devMode { - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(clientMetrics.TotalMetricsGauge), t.tagsByType[gauge]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(clientMetrics.TotalMetricsCount), t.tagsByType[count]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(clientMetrics.TotalMetricsHistogram), t.tagsByType[histogram]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(clientMetrics.TotalMetricsDistribution), t.tagsByType[distribution]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(clientMetrics.TotalMetricsSet), t.tagsByType[set]) - telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(clientMetrics.TotalMetricsTiming), t.tagsByType[timing]) - } - - telemetryCount("datadog.dogstatsd.client.events", int64(clientMetrics.TotalEvents), t.tags) - telemetryCount("datadog.dogstatsd.client.service_checks", int64(clientMetrics.TotalServiceChecks), t.tags) - telemetryCount("datadog.dogstatsd.client.metric_dropped_on_receive", int64(clientMetrics.TotalDroppedOnReceive), t.tags) - - senderMetrics := t.c.sender.flushTelemetryMetrics() - telemetryCount("datadog.dogstatsd.client.packets_sent", int64(senderMetrics.TotalSentPayloads), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_sent", int64(senderMetrics.TotalSentBytes), t.tags) - telemetryCount("datadog.dogstatsd.client.packets_dropped", int64(senderMetrics.TotalDroppedPayloads), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_dropped", int64(senderMetrics.TotalDroppedBytes), t.tags) - telemetryCount("datadog.dogstatsd.client.packets_dropped_queue", int64(senderMetrics.TotalDroppedPayloadsQueueFull), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_dropped_queue", int64(senderMetrics.TotalDroppedBytesQueueFull), t.tags) - telemetryCount("datadog.dogstatsd.client.packets_dropped_writer", int64(senderMetrics.TotalDroppedPayloadsWriter), t.tags) - telemetryCount("datadog.dogstatsd.client.bytes_dropped_writer", int64(senderMetrics.TotalDroppedBytesWriter), t.tags) - - if aggMetrics := t.c.agg.flushTelemetryMetrics(); aggMetrics != nil { - telemetryCount("datadog.dogstatsd.client.aggregated_context", int64(aggMetrics.nbContext), t.tags) - if t.devMode { - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(aggMetrics.nbContextGauge), t.tagsByType[gauge]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(aggMetrics.nbContextSet), t.tagsByType[set]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(aggMetrics.nbContextCount), t.tagsByType[count]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(aggMetrics.nbContextHistogram), t.tagsByType[histogram]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(aggMetrics.nbContextDistribution), t.tagsByType[distribution]) - telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(aggMetrics.nbContextTiming), t.tagsByType[timing]) - } - } - - return m -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/udp.go b/vendor/github.com/DataDog/datadog-go/statsd/udp.go deleted file mode 100644 index 8af522c5..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/udp.go +++ /dev/null @@ -1,40 +0,0 @@ -package statsd - -import ( - "errors" - "net" - "time" -) - -// udpWriter is an internal class wrapping around management of UDP connection -type udpWriter struct { - conn net.Conn -} - -// New returns a pointer to a new udpWriter given an addr in the format "hostname:port". -func newUDPWriter(addr string) (*udpWriter, error) { - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, err - } - conn, err := net.DialUDP("udp", nil, udpAddr) - if err != nil { - return nil, err - } - writer := &udpWriter{conn: conn} - return writer, nil -} - -// SetWriteTimeout is not needed for UDP, returns error -func (w *udpWriter) SetWriteTimeout(d time.Duration) error { - return errors.New("SetWriteTimeout: not supported for UDP connections") -} - -// Write data to the UDP connection with no error handling -func (w *udpWriter) Write(data []byte) (int, error) { - return w.conn.Write(data) -} - -func (w *udpWriter) Close() error { - return w.conn.Close() -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds.go b/vendor/github.com/DataDog/datadog-go/statsd/uds.go deleted file mode 100644 index 6c52261b..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/uds.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build !windows - -package statsd - -import ( - "net" - "sync" - "time" -) - -/* -UDSTimeout holds the default timeout for UDS socket writes, as they can get -blocking when the receiving buffer is full. -*/ -const defaultUDSTimeout = 100 * time.Millisecond - -// udsWriter is an internal class wrapping around management of UDS connection -type udsWriter struct { - // Address to send metrics to, needed to allow reconnection on error - addr net.Addr - // Established connection object, or nil if not connected yet - conn net.Conn - // write timeout - writeTimeout time.Duration - sync.RWMutex // used to lock conn / writer can replace it -} - -// newUDSWriter returns a pointer to a new udsWriter given a socket file path as addr. -func newUDSWriter(addr string) (*udsWriter, error) { - udsAddr, err := net.ResolveUnixAddr("unixgram", addr) - if err != nil { - return nil, err - } - // Defer connection to first Write - writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout} - return writer, nil -} - -// SetWriteTimeout allows the user to set a custom write timeout -func (w *udsWriter) SetWriteTimeout(d time.Duration) error { - w.writeTimeout = d - return nil -} - -// Write data to the UDS connection with write timeout and minimal error handling: -// create the connection if nil, and destroy it if the statsd server has disconnected -func (w *udsWriter) Write(data []byte) (int, error) { - conn, err := w.ensureConnection() - if err != nil { - return 0, err - } - - conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) - n, e := conn.Write(data) - - if err, isNetworkErr := e.(net.Error); err != nil && (!isNetworkErr || !err.Temporary()) { - // Statsd server disconnected, retry connecting at next packet - w.unsetConnection() - return 0, e - } - return n, e -} - -func (w *udsWriter) Close() error { - if w.conn != nil { - return w.conn.Close() - } - return nil -} - -func (w *udsWriter) ensureConnection() (net.Conn, error) { - // Check if we've already got a socket we can use - w.RLock() - currentConn := w.conn - w.RUnlock() - - if currentConn != nil { - return currentConn, nil - } - - // Looks like we might need to connect - try again with write locking. - w.Lock() - defer w.Unlock() - if w.conn != nil { - return w.conn, nil - } - - newConn, err := net.Dial(w.addr.Network(), w.addr.String()) - if err != nil { - return nil, err - } - w.conn = newConn - return newConn, nil -} - -func (w *udsWriter) unsetConnection() { - w.Lock() - defer w.Unlock() - w.conn = nil -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/uds_windows.go b/vendor/github.com/DataDog/datadog-go/statsd/uds_windows.go deleted file mode 100644 index 9c97dfd4..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/uds_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build windows - -package statsd - -import "fmt" - -// newUDSWriter is disable on windows as unix sockets are not available -func newUDSWriter(addr string) (statsdWriter, error) { - return nil, fmt.Errorf("unix socket is not available on windows") -} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/worker.go b/vendor/github.com/DataDog/datadog-go/statsd/worker.go deleted file mode 100644 index 2ab0c75f..00000000 --- a/vendor/github.com/DataDog/datadog-go/statsd/worker.go +++ /dev/null @@ -1,163 +0,0 @@ -package statsd - -import ( - "math/rand" - "sync" - "time" -) - -type worker struct { - pool *bufferPool - buffer *statsdBuffer - sender *sender - random *rand.Rand - randomLock sync.Mutex - sync.Mutex - - inputMetrics chan metric - stop chan struct{} -} - -func newWorker(pool *bufferPool, sender *sender) *worker { - // Each worker uses its own random source and random lock to prevent - // workers in separate goroutines from contending for the lock on the - // "math/rand" package-global random source (e.g. calls like - // "rand.Float64()" must acquire a shared lock to get the next - // pseudorandom number). - // Note that calling "time.Now().UnixNano()" repeatedly quickly may return - // very similar values. That's fine for seeding the worker-specific random - // source because we just need an evenly distributed stream of float values. - // Do not use this random source for cryptographic randomness. - random := rand.New(rand.NewSource(time.Now().UnixNano())) - return &worker{ - pool: pool, - sender: sender, - buffer: pool.borrowBuffer(), - random: random, - stop: make(chan struct{}), - } -} - -func (w *worker) startReceivingMetric(bufferSize int) { - w.inputMetrics = make(chan metric, bufferSize) - go w.pullMetric() -} - -func (w *worker) stopReceivingMetric() { - w.stop <- struct{}{} -} - -func (w *worker) pullMetric() { - for { - select { - case m := <-w.inputMetrics: - w.processMetric(m) - case <-w.stop: - return - } - } -} - -func (w *worker) processMetric(m metric) error { - if !w.shouldSample(m.rate) { - return nil - } - w.Lock() - var err error - if err = w.writeMetricUnsafe(m); err == errBufferFull { - w.flushUnsafe() - err = w.writeMetricUnsafe(m) - } - w.Unlock() - return err -} - -func (w *worker) shouldSample(rate float64) bool { - // sources created by rand.NewSource() (ie. w.random) are not thread safe. - // TODO: use defer once the lowest Go version we support is 1.14 (defer - // has an overhead before that). - w.randomLock.Lock() - if rate < 1 && w.random.Float64() > rate { - w.randomLock.Unlock() - return false - } - w.randomLock.Unlock() - return true -} - -func (w *worker) writeAggregatedMetricUnsafe(m metric, metricSymbol []byte) error { - globalPos := 0 - - // first check how much data we can write to the buffer: - // +3 + len(metricSymbol) because the message will include '||#' before the tags - // +1 for the potential line break at the start of the metric - tagsSize := len(m.stags) + 4 + len(metricSymbol) - for _, t := range m.globalTags { - tagsSize += len(t) + 1 - } - - for { - pos, err := w.buffer.writeAggregated(metricSymbol, m.namespace, m.globalTags, m.name, m.fvalues[globalPos:], m.stags, tagsSize) - if err == errPartialWrite { - // We successfully wrote part of the histogram metrics. - // We flush the current buffer and finish the histogram - // in a new one. - w.flushUnsafe() - globalPos += pos - } else { - return err - } - } -} - -func (w *worker) writeMetricUnsafe(m metric) error { - switch m.metricType { - case gauge: - return w.buffer.writeGauge(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case count: - return w.buffer.writeCount(m.namespace, m.globalTags, m.name, m.ivalue, m.tags, m.rate) - case histogram: - return w.buffer.writeHistogram(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case distribution: - return w.buffer.writeDistribution(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case set: - return w.buffer.writeSet(m.namespace, m.globalTags, m.name, m.svalue, m.tags, m.rate) - case timing: - return w.buffer.writeTiming(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) - case event: - return w.buffer.writeEvent(*m.evalue, m.globalTags) - case serviceCheck: - return w.buffer.writeServiceCheck(*m.scvalue, m.globalTags) - case histogramAggregated: - return w.writeAggregatedMetricUnsafe(m, histogramSymbol) - case distributionAggregated: - return w.writeAggregatedMetricUnsafe(m, distributionSymbol) - case timingAggregated: - return w.writeAggregatedMetricUnsafe(m, timingSymbol) - default: - return nil - } -} - -func (w *worker) flush() { - w.Lock() - w.flushUnsafe() - w.Unlock() -} - -func (w *worker) pause() { - w.Lock() -} - -func (w *worker) unpause() { - w.Unlock() -} - -// flush the current buffer. Lock must be held by caller. -// flushed buffer written to the network asynchronously. -func (w *worker) flushUnsafe() { - if len(w.buffer.bytes()) > 0 { - w.sender.send(w.buffer) - w.buffer = w.pool.borrowBuffer() - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index d202af6d..3a25bd82 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,6 @@ -# github.com/DataDog/datadog-go v4.5.1+incompatible -## explicit -github.com/DataDog/datadog-go/statsd +# github.com/DataDog/datadog-go/v5 v5.6.0 +## explicit; go 1.13 +github.com/DataDog/datadog-go/v5/statsd # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio @@ -65,7 +65,7 @@ github.com/rs/xid ## explicit; go 1.13 github.com/sirupsen/logrus github.com/sirupsen/logrus/hooks/test -# github.com/stretchr/testify v1.8.0 +# github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require