diff --git a/go.mod b/go.mod
index 8b693a1..d4c8bc5 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,8 @@
module github.com/ClickHouse/kubenetmon
go 1.23
+toolchain go1.24.1
+
require (
github.com/ClickHouse/clickhouse-go/v2 v2.26.0
github.com/PraserX/ipconv v1.2.0
@@ -8,7 +10,7 @@ require (
github.com/prometheus/client_golang v1.19.1
github.com/rs/zerolog v1.32.0
github.com/seancfoley/ipaddress-go v1.6.0
- github.com/stretchr/testify v1.9.0
+ github.com/stretchr/testify v1.10.0
github.com/ti-mo/conntrack v0.5.0
go.uber.org/mock v0.4.0
google.golang.org/grpc v1.65.0
@@ -20,7 +22,7 @@ require (
)
require (
- github.com/ClickHouse/ch-go v0.61.5 // indirect
+ github.com/ClickHouse/ch-go v0.65.0 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
@@ -29,7 +31,7 @@ require (
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/go-faster/city v1.0.1 // indirect
github.com/go-faster/errors v0.7.1 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
@@ -41,7 +43,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/josharian/native v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.7 // indirect
+ github.com/klauspost/compress v1.17.11 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
@@ -51,18 +53,19 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/paulmach/orb v0.11.1 // indirect
- github.com/pierrec/lz4/v4 v4.1.21 // indirect
+ github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/seancfoley/bintree v1.3.1 // indirect
github.com/segmentio/asm v1.2.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/ti-mo/netfilter v0.5.0 // indirect
- go.opentelemetry.io/otel v1.26.0 // indirect
- go.opentelemetry.io/otel/trace v1.26.0 // indirect
+ go.opentelemetry.io/otel v1.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.34.0 // indirect
golang.org/x/net v0.36.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.11.0 // indirect
diff --git a/go.sum b/go.sum
index d6407c7..4cead5d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4=
-github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg=
+github.com/ClickHouse/ch-go v0.65.0 h1:vZAXfTQliuNNefqkPDewX3kgRxN6Q4vUENnnY+ynTRY=
+github.com/ClickHouse/ch-go v0.65.0/go.mod h1:tCM0XEH5oWngoi9Iu/8+tjPBo04I/FxNIffpdjtwx3k=
github.com/ClickHouse/clickhouse-go/v2 v2.26.0 h1:j4/y6NYaCcFkJwN/TU700ebW+nmsIy34RmUAAcZKy9w=
github.com/ClickHouse/clickhouse-go/v2 v2.26.0/go.mod h1:iDTViXk2Fgvf1jn2dbJd1ys+fBkdD1UMRnXlwmhijhQ=
github.com/PraserX/ipconv v1.2.0 h1:3bboP9EDfsuMF5C3qM25OmZA4+cCfk1Ahpx8zn5G2tM=
@@ -23,8 +23,8 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg=
github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
@@ -63,8 +63,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
-github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -99,8 +99,8 @@ github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKi
github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU=
github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
-github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
-github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
+github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -113,8 +113,8 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0=
github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
@@ -136,8 +136,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/ti-mo/conntrack v0.5.0 h1:OWiWm18gx6IA0c8FvLuXpcvHUsR0Cyw6FIFIZtYJ2W4=
github.com/ti-mo/conntrack v0.5.0/go.mod h1:xTW+s2bugPtNnx58p1yyz+UADwho2cZFom6SsK0UTw0=
github.com/ti-mo/netfilter v0.5.0 h1:MZmsUw5bFRecOb0AeyjOPxTHg4UxYzyEs0Ek/6Lxoy8=
@@ -152,10 +152,10 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
-go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
-go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
-go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
-go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
+go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
+go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
+go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
+go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -205,8 +205,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/ClickHouse/ch-go/compress/compress.go b/vendor/github.com/ClickHouse/ch-go/compress/compress.go
index a89c640..bf2cbf9 100644
--- a/vendor/github.com/ClickHouse/ch-go/compress/compress.go
+++ b/vendor/github.com/ClickHouse/ch-go/compress/compress.go
@@ -7,18 +7,39 @@ import (
"github.com/go-faster/city"
)
-//go:generate go run github.com/dmarkham/enumer -transform snake_upper -type Method -output method_enum.go
+//go:generate go run github.com/dmarkham/enumer -transform upper -type Method -output method_enum.go
// Method is compression codec.
type Method byte
// Possible compression methods.
const (
- None Method = 0x02
- LZ4 Method = 0x82
- ZSTD Method = 0x90
+ None Method = iota
+ LZ4
+ LZ4HC
+ ZSTD
+ NumMethods int = iota
)
+type methodEncoding byte
+
+const (
+ encodedNone methodEncoding = 0x02
+ encodedLZ4 methodEncoding = 0x82
+ encodedLZ4HC methodEncoding = encodedLZ4
+ encodedZSTD methodEncoding = 0x90
+)
+
+var methodTable = map[Method]methodEncoding{
+ None: encodedNone,
+ LZ4: encodedLZ4,
+ LZ4HC: encodedLZ4HC,
+ ZSTD: encodedZSTD,
+}
+
+// Level for supporting compression codecs.
+type Level uint32
+
// Constants for compression encoding.
//
// See https://go-faster.org/docs/clickhouse/compression for reference.
diff --git a/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go b/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go
index 8d44352..373eb28 100644
--- a/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go
+++ b/vendor/github.com/ClickHouse/ch-go/compress/method_enum.go
@@ -1,4 +1,4 @@
-// Code generated by "enumer -transform snake_upper -type Method -output method_enum.go"; DO NOT EDIT.
+// Code generated by "enumer -transform upper -type Method -output method_enum.go"; DO NOT EDIT.
package compress
@@ -7,58 +7,47 @@ import (
"strings"
)
-const (
- _MethodName_0 = "NONE"
- _MethodLowerName_0 = "none"
- _MethodName_1 = "LZ4"
- _MethodLowerName_1 = "lz4"
- _MethodName_2 = "ZSTD"
- _MethodLowerName_2 = "zstd"
-)
+const _MethodName = "NONELZ4LZ4HCZSTD"
-var (
- _MethodIndex_0 = [...]uint8{0, 4}
- _MethodIndex_1 = [...]uint8{0, 3}
- _MethodIndex_2 = [...]uint8{0, 4}
-)
+var _MethodIndex = [...]uint8{0, 4, 7, 12, 16}
+
+const _MethodLowerName = "nonelz4lz4hczstd"
func (i Method) String() string {
- switch {
- case i == 2:
- return _MethodName_0
- case i == 130:
- return _MethodName_1
- case i == 144:
- return _MethodName_2
- default:
+ if i >= Method(len(_MethodIndex)-1) {
return fmt.Sprintf("Method(%d)", i)
}
+ return _MethodName[_MethodIndex[i]:_MethodIndex[i+1]]
}
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
func _MethodNoOp() {
var x [1]struct{}
- _ = x[None-(2)]
- _ = x[LZ4-(130)]
- _ = x[ZSTD-(144)]
+ _ = x[None-(0)]
+ _ = x[LZ4-(1)]
+ _ = x[LZ4HC-(2)]
+ _ = x[ZSTD-(3)]
}
-var _MethodValues = []Method{None, LZ4, ZSTD}
+var _MethodValues = []Method{None, LZ4, LZ4HC, ZSTD}
var _MethodNameToValueMap = map[string]Method{
- _MethodName_0[0:4]: None,
- _MethodLowerName_0[0:4]: None,
- _MethodName_1[0:3]: LZ4,
- _MethodLowerName_1[0:3]: LZ4,
- _MethodName_2[0:4]: ZSTD,
- _MethodLowerName_2[0:4]: ZSTD,
+ _MethodName[0:4]: None,
+ _MethodLowerName[0:4]: None,
+ _MethodName[4:7]: LZ4,
+ _MethodLowerName[4:7]: LZ4,
+ _MethodName[7:12]: LZ4HC,
+ _MethodLowerName[7:12]: LZ4HC,
+ _MethodName[12:16]: ZSTD,
+ _MethodLowerName[12:16]: ZSTD,
}
var _MethodNames = []string{
- _MethodName_0[0:4],
- _MethodName_1[0:3],
- _MethodName_2[0:4],
+ _MethodName[0:4],
+ _MethodName[4:7],
+ _MethodName[7:12],
+ _MethodName[12:16],
}
// MethodString retrieves an enum value from the enum constants string name.
diff --git a/vendor/github.com/ClickHouse/ch-go/compress/reader.go b/vendor/github.com/ClickHouse/ch-go/compress/reader.go
index 6a26f9d..fc4a0e6 100644
--- a/vendor/github.com/ClickHouse/ch-go/compress/reader.go
+++ b/vendor/github.com/ClickHouse/ch-go/compress/reader.go
@@ -70,8 +70,8 @@ func (r *Reader) readBlock() error {
DataSize: dataSize,
}, "mismatch")
}
- switch m := Method(r.header[hMethod]); m {
- case LZ4:
+ switch m := methodEncoding(r.header[hMethod]); m {
+ case encodedLZ4: // == encodedLZ4HC, as decompression is similar for both
n, err := lz4.UncompressBlock(r.raw[headerSize:], r.data)
if err != nil {
return errors.Wrap(err, "uncompress")
@@ -81,7 +81,7 @@ func (r *Reader) readBlock() error {
n, dataSize,
)
}
- case ZSTD:
+ case encodedZSTD:
if r.zstd == nil {
// Lazily initializing to prevent spawning goroutines in NewReader.
// See https://github.com/golang/go/issues/47056#issuecomment-997436820
@@ -104,7 +104,7 @@ func (r *Reader) readBlock() error {
)
}
r.data = data
- case None:
+ case encodedNone:
copy(r.data, r.raw[headerSize:])
default:
return errors.Errorf("compression 0x%02x not implemented", m)
diff --git a/vendor/github.com/ClickHouse/ch-go/compress/writer.go b/vendor/github.com/ClickHouse/ch-go/compress/writer.go
index 6094b05..a8d2680 100644
--- a/vendor/github.com/ClickHouse/ch-go/compress/writer.go
+++ b/vendor/github.com/ClickHouse/ch-go/compress/writer.go
@@ -2,6 +2,7 @@ package compress
import (
"encoding/binary"
+ "math"
"github.com/go-faster/city"
"github.com/go-faster/errors"
@@ -9,30 +10,45 @@ import (
"github.com/pierrec/lz4/v4"
)
+const (
+ LevelZero Level = 0
+ LevelLZ4HCDefault Level = 9
+ LevelLZ4HCMax Level = 12
+)
+
// Writer encodes compressed blocks.
type Writer struct {
Data []byte
- lz4 *lz4.Compressor
- zstd *zstd.Encoder
+ method Method
+
+ lz4 *lz4.Compressor
+ lz4hc *lz4.CompressorHC
+ zstd *zstd.Encoder
}
// Compress buf into Data.
-func (w *Writer) Compress(m Method, buf []byte) error {
+func (w *Writer) Compress(buf []byte) error {
maxSize := lz4.CompressBlockBound(len(buf))
w.Data = append(w.Data[:0], make([]byte, maxSize+headerSize)...)
_ = w.Data[:headerSize]
- w.Data[hMethod] = byte(m)
+ w.Data[hMethod] = byte(methodTable[w.method])
var n int
- switch m {
+ switch w.method {
case LZ4:
compressedSize, err := w.lz4.CompressBlock(buf, w.Data[headerSize:])
if err != nil {
return errors.Wrap(err, "block")
}
n = compressedSize
+ case LZ4HC:
+ compressedSize, err := w.lz4hc.CompressBlock(buf, w.Data[headerSize:])
+ if err != nil {
+ return errors.Wrap(err, "block")
+ }
+ n = compressedSize
case ZSTD:
w.Data = w.zstd.EncodeAll(buf, w.Data[:headerSize])
n = len(w.Data) - headerSize
@@ -40,8 +56,12 @@ func (w *Writer) Compress(m Method, buf []byte) error {
n = copy(w.Data[headerSize:], buf)
}
- w.Data = w.Data[:n+headerSize]
+ // security: https://github.com/ClickHouse/ch-go/pull/1041
+ if uint64(n)+uint64(compressHeaderSize) > math.MaxUint32 {
+ return errors.New("compressed size overflows uint32")
+ }
+ w.Data = w.Data[:n+headerSize]
binary.LittleEndian.PutUint32(w.Data[hRawSize:], uint32(n+compressHeaderSize))
binary.LittleEndian.PutUint32(w.Data[hDataSize:], uint32(len(buf)))
h := city.CH128(w.Data[hMethod:])
@@ -51,17 +71,40 @@ func (w *Writer) Compress(m Method, buf []byte) error {
return nil
}
-func NewWriter() *Writer {
- w, err := zstd.NewWriter(nil,
- zstd.WithEncoderLevel(zstd.SpeedDefault),
- zstd.WithEncoderConcurrency(1),
- zstd.WithLowerEncoderMem(true),
- )
- if err != nil {
- panic(err)
+// NewWriter creates a new Writer with the specified compression level that supports the specified method.
+func NewWriter(l Level, m Method) *Writer {
+ var err error
+ var zstdWriter *zstd.Encoder
+ var lz4Writer *lz4.Compressor
+ var lz4hcWriter *lz4.CompressorHC
+
+ switch m {
+ case LZ4:
+ lz4Writer = &lz4.Compressor{}
+ case LZ4HC:
+ levelLZ4HC := l
+ if levelLZ4HC == 0 {
+ levelLZ4HC = LevelLZ4HCDefault
+ } else {
+ levelLZ4HC = Level(math.Min(float64(levelLZ4HC), float64(LevelLZ4HCMax)))
+ }
+ lz4hcWriter = &lz4.CompressorHC{Level: lz4.CompressionLevel(1 << (8 + levelLZ4HC))}
+ case ZSTD:
+ zstdWriter, err = zstd.NewWriter(nil,
+ zstd.WithEncoderLevel(zstd.SpeedDefault),
+ zstd.WithEncoderConcurrency(1),
+ zstd.WithLowerEncoderMem(true),
+ )
+ if err != nil {
+ panic(err)
+ }
+ default:
}
+
return &Writer{
- lz4: &lz4.Compressor{},
- zstd: w,
+ method: m,
+ lz4: lz4Writer,
+ lz4hc: lz4hcWriter,
+ zstd: zstdWriter,
}
}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/block.go b/vendor/github.com/ClickHouse/ch-go/proto/block.go
index 3454806..2e8fb88 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/block.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/block.go
@@ -183,6 +183,38 @@ func (b Block) EncodeRawBlock(buf *Buffer, version int, input []InputColumn) err
return nil
}
+func (b Block) WriteBlock(w *Writer, version int, input []InputColumn) error {
+ w.ChainBuffer(func(buf *Buffer) {
+ if FeatureBlockInfo.In(version) {
+ b.Info.Encode(buf)
+ }
+ buf.PutInt(b.Columns)
+ buf.PutInt(b.Rows)
+ })
+
+ for _, col := range input {
+ if r := col.Data.Rows(); r != b.Rows {
+ return errors.Errorf("%q has %d rows, expected %d", col.Name, r, b.Rows)
+ }
+ w.ChainBuffer(func(buf *Buffer) {
+ col.EncodeStart(buf, version)
+ })
+ if v, ok := col.Data.(Preparable); ok {
+ if err := v.Prepare(); err != nil {
+ return errors.Wrapf(err, "prepare %q", col.Name)
+ }
+ }
+ if col.Data.Rows() == 0 {
+ continue
+ }
+ if v, ok := col.Data.(StateEncoder); ok {
+ w.ChainBuffer(v.EncodeState)
+ }
+ col.Data.WriteColumn(w)
+ }
+ return nil
+}
+
// This constrains can prevent accidental OOM and allow early detection
// of erroneous column or row count.
//
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go
index ad57436..9b7639b 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go
@@ -130,6 +130,12 @@ func (c ColArr[T]) EncodeColumn(b *Buffer) {
c.Data.EncodeColumn(b)
}
+// WriteColumn implements ColInput.
+func (c ColArr[T]) WriteColumn(w *Writer) {
+ c.Offsets.WriteColumn(w)
+ c.Data.WriteColumn(w)
+}
+
// Append appends new row to column.
func (c *ColArr[T]) Append(v []T) {
c.Data.AppendArr(v)
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_arr_go123.go b/vendor/github.com/ClickHouse/ch-go/proto/col_arr_go123.go
new file mode 100644
index 0000000..c421b41
--- /dev/null
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_arr_go123.go
@@ -0,0 +1,22 @@
+//go:build go1.23
+
+package proto
+
+import "iter"
+
+// RowRange returns a [iter.Seq] iterator over i-th row.
+func (c ColArr[T]) RowRange(i int) iter.Seq[T] {
+ var start int
+ end := int(c.Offsets[i])
+ if i > 0 {
+ start = int(c.Offsets[i-1])
+ }
+
+ return func(yield func(T) bool) {
+ for idx := start; idx < end; idx++ {
+ if !yield(c.Data.Row(idx)) {
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go
index 2ed9b8e..d5f614d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go
@@ -1,6 +1,8 @@
package proto
import (
+ "reflect"
+ "strconv"
"strings"
"github.com/go-faster/errors"
@@ -36,20 +38,8 @@ func (c *ColAuto) Infer(t ColumnType) error {
switch t {
case ColumnTypeNothing:
c.Data = new(ColNothing)
- case ColumnTypeNullable.Sub(ColumnTypeNothing):
- c.Data = new(ColNothing).Nullable()
- case ColumnTypeArray.Sub(ColumnTypeNothing):
- c.Data = new(ColNothing).Array()
case ColumnTypeString:
c.Data = new(ColStr)
- case ColumnTypeArray.Sub(ColumnTypeString):
- c.Data = new(ColStr).Array()
- case ColumnTypeNullable.Sub(ColumnTypeString):
- c.Data = new(ColStr).Nullable()
- case ColumnTypeLowCardinality.Sub(ColumnTypeString):
- c.Data = new(ColStr).LowCardinality()
- case ColumnTypeArray.Sub(ColumnTypeLowCardinality.Sub(ColumnTypeString)):
- c.Data = new(ColStr).LowCardinality().Array()
case ColumnTypeBool:
c.Data = new(ColBool)
case ColumnTypeDateTime:
@@ -60,12 +50,50 @@ func (c *ColAuto) Infer(t ColumnType) error {
c.Data = NewMap[string, string](new(ColStr), new(ColStr))
case ColumnTypeUUID:
c.Data = new(ColUUID)
- case ColumnTypeArray.Sub(ColumnTypeUUID):
- c.Data = new(ColUUID).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUUID):
- c.Data = new(ColUUID).Nullable()
default:
switch t.Base() {
+ case ColumnTypeArray:
+ inner := new(ColAuto)
+ if err := inner.Infer(t.Elem()); err != nil {
+ return errors.Wrap(err, "array")
+ }
+ innerValue := reflect.ValueOf(inner.Data)
+ arrayMethod := innerValue.MethodByName("Array")
+ if arrayMethod.IsValid() && arrayMethod.Type().NumOut() == 1 {
+ if col, ok := arrayMethod.Call(nil)[0].Interface().(Column); ok {
+ c.Data = col
+ c.DataType = t
+ return nil
+ }
+ }
+ case ColumnTypeNullable:
+ inner := new(ColAuto)
+ if err := inner.Infer(t.Elem()); err != nil {
+ return errors.Wrap(err, "nullable")
+ }
+ innerValue := reflect.ValueOf(inner.Data)
+ nullableMethod := innerValue.MethodByName("Nullable")
+ if nullableMethod.IsValid() && nullableMethod.Type().NumOut() == 1 {
+ if col, ok := nullableMethod.Call(nil)[0].Interface().(Column); ok {
+ c.Data = col
+ c.DataType = t
+ return nil
+ }
+ }
+ case ColumnTypeLowCardinality:
+ inner := new(ColAuto)
+ if err := inner.Infer(t.Elem()); err != nil {
+ return errors.Wrap(err, "low cardinality")
+ }
+ innerValue := reflect.ValueOf(inner.Data)
+ lowCardinalityMethod := innerValue.MethodByName("LowCardinality")
+ if lowCardinalityMethod.IsValid() && lowCardinalityMethod.Type().NumOut() == 1 {
+ if col, ok := lowCardinalityMethod.Call(nil)[0].Interface().(Column); ok {
+ c.Data = col
+ c.DataType = t
+ return nil
+ }
+ }
case ColumnTypeDateTime:
v := new(ColDateTime)
if err := v.Infer(t); err != nil {
@@ -74,6 +102,49 @@ func (c *ColAuto) Infer(t ColumnType) error {
c.Data = v
c.DataType = t
return nil
+ case ColumnTypeDecimal:
+ var prec int
+ precStr, _, _ := strings.Cut(string(t.Elem()), ",")
+ if precStr != "" {
+ var err error
+ precStr = strings.TrimSpace(precStr)
+ prec, err = strconv.Atoi(precStr)
+ if err != nil {
+ return errors.Wrap(err, "decimal")
+ }
+ } else {
+ prec = 10
+ }
+ switch {
+ case prec >= 1 && prec < 10:
+ c.Data = new(ColDecimal32)
+ case prec >= 10 && prec < 19:
+ c.Data = new(ColDecimal64)
+ case prec >= 19 && prec < 39:
+ c.Data = new(ColDecimal128)
+ case prec >= 39 && prec < 77:
+ c.Data = new(ColDecimal256)
+ default:
+ return errors.Errorf("decimal precision %d out of range", prec)
+ }
+ c.DataType = t
+ return nil
+ case ColumnTypeDecimal32:
+ c.Data = new(ColDecimal32)
+ c.DataType = t
+ return nil
+ case ColumnTypeDecimal64:
+ c.Data = new(ColDecimal64)
+ c.DataType = t
+ return nil
+ case ColumnTypeDecimal128:
+ c.Data = new(ColDecimal128)
+ c.DataType = t
+ return nil
+ case ColumnTypeDecimal256:
+ c.Data = new(ColDecimal256)
+ c.DataType = t
+ return nil
case ColumnTypeEnum8, ColumnTypeEnum16:
v := new(ColEnum)
if err := v.Infer(t); err != nil {
@@ -122,3 +193,7 @@ func (c ColAuto) Reset() {
func (c ColAuto) EncodeColumn(b *Buffer) {
c.Data.EncodeColumn(b)
}
+
+func (c ColAuto) WriteColumn(w *Writer) {
+ c.Data.WriteColumn(w)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go
index 70928c6..b297d92 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go
@@ -4,154 +4,54 @@ package proto
func inferGenerated(t ColumnType) Column {
switch t {
- case ColumnTypeArray.Sub(ColumnTypeFloat32):
- return new(ColFloat32).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFloat32):
- return new(ColFloat32).Nullable()
case ColumnTypeFloat32:
return new(ColFloat32)
- case ColumnTypeArray.Sub(ColumnTypeFloat64):
- return new(ColFloat64).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFloat64):
- return new(ColFloat64).Nullable()
case ColumnTypeFloat64:
return new(ColFloat64)
- case ColumnTypeArray.Sub(ColumnTypeIPv4):
- return new(ColIPv4).Array()
- case ColumnTypeNullable.Sub(ColumnTypeIPv4):
- return new(ColIPv4).Nullable()
case ColumnTypeIPv4:
return new(ColIPv4)
- case ColumnTypeArray.Sub(ColumnTypeIPv6):
- return new(ColIPv6).Array()
- case ColumnTypeNullable.Sub(ColumnTypeIPv6):
- return new(ColIPv6).Nullable()
case ColumnTypeIPv6:
return new(ColIPv6)
- case ColumnTypeArray.Sub(ColumnTypeDate):
- return new(ColDate).Array()
- case ColumnTypeNullable.Sub(ColumnTypeDate):
- return new(ColDate).Nullable()
case ColumnTypeDate:
return new(ColDate)
- case ColumnTypeArray.Sub(ColumnTypeDate32):
- return new(ColDate32).Array()
- case ColumnTypeNullable.Sub(ColumnTypeDate32):
- return new(ColDate32).Nullable()
case ColumnTypeDate32:
return new(ColDate32)
- case ColumnTypeArray.Sub(ColumnTypeInt8):
- return new(ColInt8).Array()
- case ColumnTypeNullable.Sub(ColumnTypeInt8):
- return new(ColInt8).Nullable()
case ColumnTypeInt8:
return new(ColInt8)
- case ColumnTypeArray.Sub(ColumnTypeUInt8):
- return new(ColUInt8).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUInt8):
- return new(ColUInt8).Nullable()
case ColumnTypeUInt8:
return new(ColUInt8)
- case ColumnTypeArray.Sub(ColumnTypeInt16):
- return new(ColInt16).Array()
- case ColumnTypeNullable.Sub(ColumnTypeInt16):
- return new(ColInt16).Nullable()
case ColumnTypeInt16:
return new(ColInt16)
- case ColumnTypeArray.Sub(ColumnTypeUInt16):
- return new(ColUInt16).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUInt16):
- return new(ColUInt16).Nullable()
case ColumnTypeUInt16:
return new(ColUInt16)
- case ColumnTypeArray.Sub(ColumnTypeInt32):
- return new(ColInt32).Array()
- case ColumnTypeNullable.Sub(ColumnTypeInt32):
- return new(ColInt32).Nullable()
case ColumnTypeInt32:
return new(ColInt32)
- case ColumnTypeArray.Sub(ColumnTypeUInt32):
- return new(ColUInt32).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUInt32):
- return new(ColUInt32).Nullable()
case ColumnTypeUInt32:
return new(ColUInt32)
- case ColumnTypeArray.Sub(ColumnTypeInt64):
- return new(ColInt64).Array()
- case ColumnTypeNullable.Sub(ColumnTypeInt64):
- return new(ColInt64).Nullable()
case ColumnTypeInt64:
return new(ColInt64)
- case ColumnTypeArray.Sub(ColumnTypeUInt64):
- return new(ColUInt64).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUInt64):
- return new(ColUInt64).Nullable()
case ColumnTypeUInt64:
return new(ColUInt64)
- case ColumnTypeArray.Sub(ColumnTypeInt128):
- return new(ColInt128).Array()
- case ColumnTypeNullable.Sub(ColumnTypeInt128):
- return new(ColInt128).Nullable()
case ColumnTypeInt128:
return new(ColInt128)
- case ColumnTypeArray.Sub(ColumnTypeUInt128):
- return new(ColUInt128).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUInt128):
- return new(ColUInt128).Nullable()
case ColumnTypeUInt128:
return new(ColUInt128)
- case ColumnTypeArray.Sub(ColumnTypeInt256):
- return new(ColInt256).Array()
- case ColumnTypeNullable.Sub(ColumnTypeInt256):
- return new(ColInt256).Nullable()
case ColumnTypeInt256:
return new(ColInt256)
- case ColumnTypeArray.Sub(ColumnTypeUInt256):
- return new(ColUInt256).Array()
- case ColumnTypeNullable.Sub(ColumnTypeUInt256):
- return new(ColUInt256).Nullable()
case ColumnTypeUInt256:
return new(ColUInt256)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("8")):
- return new(ColFixedStr8).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("8")):
- return new(ColFixedStr8).Nullable()
case ColumnTypeFixedString.With("8"):
return new(ColFixedStr8)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("16")):
- return new(ColFixedStr16).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("16")):
- return new(ColFixedStr16).Nullable()
case ColumnTypeFixedString.With("16"):
return new(ColFixedStr16)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("32")):
- return new(ColFixedStr32).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("32")):
- return new(ColFixedStr32).Nullable()
case ColumnTypeFixedString.With("32"):
return new(ColFixedStr32)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("64")):
- return new(ColFixedStr64).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("64")):
- return new(ColFixedStr64).Nullable()
case ColumnTypeFixedString.With("64"):
return new(ColFixedStr64)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("128")):
- return new(ColFixedStr128).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("128")):
- return new(ColFixedStr128).Nullable()
case ColumnTypeFixedString.With("128"):
return new(ColFixedStr128)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("256")):
- return new(ColFixedStr256).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("256")):
- return new(ColFixedStr256).Nullable()
case ColumnTypeFixedString.With("256"):
return new(ColFixedStr256)
- case ColumnTypeArray.Sub(ColumnTypeFixedString.With("512")):
- return new(ColFixedStr512).Array()
- case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("512")):
- return new(ColFixedStr512).Nullable()
case ColumnTypeFixedString.With("512"):
return new(ColFixedStr512)
default:
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go
index 3e998e4..38cc73d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go
@@ -42,3 +42,11 @@ func (c *ColBool) DecodeColumn(r *Reader, rows int) error {
*c = v
return nil
}
+
+// WriteColumn encodes ColBool rows to *Writer.
+func (c ColBool) WriteColumn(w *Writer) {
+ if len(c) == 0 {
+ return
+ }
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go
index 92cac70..c42966e 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go
@@ -34,3 +34,13 @@ func (c *ColBool) DecodeColumn(r *Reader, rows int) error {
}
return nil
}
+
+// WriteColumn writes Bool rows to *Writer.
+func (c ColBool) WriteColumn(w *Writer) {
+ if len(c) == 0 {
+ return
+ }
+ s := *(*slice)(unsafe.Pointer(&c)) // #nosec G103
+ src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go
index 5bf75b3..4483739 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go
@@ -20,7 +20,7 @@ func (c ColDate) Row(i int) time.Time {
return c[i].Time()
}
-// LowCardinality returns LowCardinality for Enum8 .
+// LowCardinality returns LowCardinality for Enum8.
func (c *ColDate) LowCardinality() *ColLowCardinality[time.Time] {
return &ColLowCardinality[time.Time]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go
index 38f1a91..8daa68b 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go
@@ -20,7 +20,7 @@ func (c ColDate32) Row(i int) time.Time {
return c[i].Time()
}
-// LowCardinality returns LowCardinality for Enum8 .
+// LowCardinality returns LowCardinality for Enum8.
func (c *ColDate32) LowCardinality() *ColLowCardinality[time.Time] {
return &ColLowCardinality[time.Time]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go
index 7e6ac3d..3a1f640 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDate32) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDate32) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go
index 2690a31..f4de460 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDate32) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDate32) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go
index 49bb89b..7b9cfc3 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDate) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDate) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go
index 980d8b4..022a714 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDate) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDate) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 16 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go
index 4243f2b..f8f8d97 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go
@@ -61,6 +61,10 @@ func (c ColDateTime) Row(i int) time.Time {
return c.Data[i].Time().In(c.loc())
}
+func (c *ColDateTime) AppendRaw(v DateTime) {
+ c.Data = append(c.Data, v)
+}
+
func (c *ColDateTime) Append(v time.Time) {
c.Data = append(c.Data, ToDateTime(v))
}
@@ -75,7 +79,7 @@ func (c *ColDateTime) AppendArr(vs []time.Time) {
c.Data = append(c.Data, dates...)
}
-// LowCardinality returns LowCardinality for Enum8 .
+// LowCardinality returns LowCardinality for Enum8.
func (c *ColDateTime) LowCardinality() *ColLowCardinality[time.Time] {
return &ColLowCardinality[time.Time]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go
index f4d96a4..12506e4 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go
@@ -61,11 +61,10 @@ func (c *ColDateTime64) Infer(t ColumnType) error {
if elem == "" {
return errors.Errorf("invalid DateTime64: no elements in %q", t)
}
- elems := strings.SplitN(elem, ",", 2)
- for i := range elems {
- elems[i] = strings.Trim(elems[i], `' `)
- }
- n, err := strconv.ParseUint(elems[0], 10, 8)
+ pStr, locStr, hasloc := strings.Cut(elem, ",")
+ pStr = strings.Trim(pStr, `' `)
+ locStr = strings.Trim(locStr, `' `)
+ n, err := strconv.ParseUint(pStr, 10, 8)
if err != nil {
return errors.Wrap(err, "parse precision")
}
@@ -75,8 +74,8 @@ func (c *ColDateTime64) Infer(t ColumnType) error {
}
c.Precision = p
c.PrecisionSet = true
- if len(elems) > 1 {
- loc, err := time.LoadLocation(elems[1])
+ if hasloc {
+ loc, err := time.LoadLocation(locStr)
if err != nil {
return errors.Wrap(err, "invalid location")
}
@@ -126,6 +125,10 @@ func (c ColDateTime64) Raw() *ColDateTime64Raw {
return &ColDateTime64Raw{ColDateTime64: c}
}
+func (c *ColDateTime64) Nullable() *ColNullable[time.Time] {
+ return &ColNullable[time.Time]{Values: c}
+}
+
func (c *ColDateTime64) Array() *ColArr[time.Time] {
return &ColArr[time.Time]{Data: c}
}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go
index ccff09d..69ac2f8 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDateTime64) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDateTime64) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go
index 4eeeaf5..22e5052 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDateTime64) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDateTime64) WriteColumn(w *Writer) {
+ v := c.Data
+ if len(v) == 0 {
+ return
+ }
+ const size = 64 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go
index 20e9aad..2eca6eb 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDateTime) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDateTime) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go
index 40a056c..b421475 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDateTime) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDateTime) WriteColumn(w *Writer) {
+ v := c.Data
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go
index 18811ac..775acbb 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go
@@ -42,7 +42,7 @@ func (c *ColDecimal128) AppendArr(vs []Decimal128) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Decimal128 .
+// LowCardinality returns LowCardinality for Decimal128.
func (c *ColDecimal128) LowCardinality() *ColLowCardinality[Decimal128] {
return &ColLowCardinality[Decimal128]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go
index 58c02eb..fa8498d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDecimal128) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDecimal128) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go
index 1b2fe12..9a4070f 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDecimal128) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDecimal128) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 128 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go
index ad96b27..7bb2ffa 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go
@@ -42,7 +42,7 @@ func (c *ColDecimal256) AppendArr(vs []Decimal256) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Decimal256 .
+// LowCardinality returns LowCardinality for Decimal256.
func (c *ColDecimal256) LowCardinality() *ColLowCardinality[Decimal256] {
return &ColLowCardinality[Decimal256]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go
index 301b7b1..d7b37f7 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDecimal256) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDecimal256) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go
index b0d694e..57419fb 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDecimal256) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDecimal256) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 256 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go
index 2c4f4ea..e26ec6f 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go
@@ -42,7 +42,7 @@ func (c *ColDecimal32) AppendArr(vs []Decimal32) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Decimal32 .
+// LowCardinality returns LowCardinality for Decimal32.
func (c *ColDecimal32) LowCardinality() *ColLowCardinality[Decimal32] {
return &ColLowCardinality[Decimal32]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go
index 44cb9f7..9935f75 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDecimal32) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDecimal32) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go
index eaed3df..0784525 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDecimal32) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDecimal32) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go
index c37ffcd..bfb06b9 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go
@@ -42,7 +42,7 @@ func (c *ColDecimal64) AppendArr(vs []Decimal64) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Decimal64 .
+// LowCardinality returns LowCardinality for Decimal64.
func (c *ColDecimal64) LowCardinality() *ColLowCardinality[Decimal64] {
return &ColLowCardinality[Decimal64]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go
index a0934c6..9bb72cc 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColDecimal64) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColDecimal64) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go
index f5ba1b2..c4824fc 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColDecimal64) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColDecimal64) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 64 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go
index f4af963..f798ec9 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go
@@ -63,14 +63,12 @@ func (e *ColEnum) parse(t ColumnType) error {
for _, elem := range strings.Split(elements, ",") {
def := strings.TrimSpace(elem)
// 'hello' = 1
- parts := strings.SplitN(def, "=", 2)
- if len(parts) != 2 {
+ left, right, hascomma := strings.Cut(def, "=")
+ if !hascomma {
return errors.Errorf("bad enum definition %q", def)
}
- var (
- left = strings.TrimSpace(parts[0]) // 'hello'
- right = strings.TrimSpace(parts[1]) // 1
- )
+ left = strings.TrimSpace(left) // 'hello'
+ right = strings.TrimSpace(right) // 1
idx, err := strconv.Atoi(right)
if err != nil {
return errors.Errorf("bad right side of definition %q", right)
@@ -169,4 +167,8 @@ func (e *ColEnum) EncodeColumn(b *Buffer) {
e.raw().EncodeColumn(b)
}
+func (e *ColEnum) WriteColumn(w *Writer) {
+ e.raw().WriteColumn(w)
+}
+
func (e *ColEnum) Type() ColumnType { return e.t }
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go
index 3f99c64..406e232 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go
@@ -42,7 +42,7 @@ func (c *ColEnum16) AppendArr(vs []Enum16) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Enum16 .
+// LowCardinality returns LowCardinality for Enum16.
func (c *ColEnum16) LowCardinality() *ColLowCardinality[Enum16] {
return &ColLowCardinality[Enum16]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go
index bf3b012..11b3ff0 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColEnum16) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColEnum16) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go
index 5275701..36bcabf 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColEnum16) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColEnum16) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 16 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go
index a063eaf..c9c3e43 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go
@@ -42,7 +42,7 @@ func (c *ColEnum8) AppendArr(vs []Enum8) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Enum8 .
+// LowCardinality returns LowCardinality for Enum8.
func (c *ColEnum8) LowCardinality() *ColLowCardinality[Enum8] {
return &ColLowCardinality[Enum8]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go
index edf5712..e8747e3 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go
@@ -42,3 +42,7 @@ func (c ColEnum8) EncodeColumn(b *Buffer) {
b.Buf[i+start] = uint8(v[i])
}
}
+
+func (c ColEnum8) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go
index 09e6fe2..7d3b380 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go
@@ -37,3 +37,12 @@ func (c ColEnum8) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColEnum8) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ src := *(*[]byte)(unsafe.Pointer(&v))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go
index 982cfa1..7ae816e 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go
@@ -86,6 +86,11 @@ func (c *ColFixedStr) DecodeColumn(r *Reader, rows int) error {
return nil
}
+// WriteColumn writes ColFixedStr rows to *Writer.
+func (c ColFixedStr) WriteColumn(w *Writer) {
+ w.ChainWrite(c.Buf)
+}
+
// Array returns new Array(FixedString).
func (c *ColFixedStr) Array() *ColArr[[]byte] {
return &ColArr[[]byte]{
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go
index cb76953..130b7be 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr128) AppendArr(vs [][128]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr128 .
+// LowCardinality returns LowCardinality for FixedStr128.
func (c *ColFixedStr128) LowCardinality() *ColLowCardinality[[128]byte] {
return &ColLowCardinality[[128]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go
index edf7f9c..3ecd33c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr128) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr128) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go
index 46ee96c..6cba7db 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr128) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr128) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 128
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go
index adfc2de..765a418 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr16) AppendArr(vs [][16]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr16 .
+// LowCardinality returns LowCardinality for FixedStr16.
func (c *ColFixedStr16) LowCardinality() *ColLowCardinality[[16]byte] {
return &ColLowCardinality[[16]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go
index 4a9313a..030b49c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr16) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr16) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go
index 5d0dbee..0b1e411 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr16) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr16) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 16
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go
index 1e2d955..da65261 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr256) AppendArr(vs [][256]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr256 .
+// LowCardinality returns LowCardinality for FixedStr256.
func (c *ColFixedStr256) LowCardinality() *ColLowCardinality[[256]byte] {
return &ColLowCardinality[[256]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go
index bb961f8..a4b8a5b 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr256) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr256) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go
index 277ac59..318908a 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr256) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr256) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 256
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go
index 90adba9..052bae3 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr32) AppendArr(vs [][32]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr32 .
+// LowCardinality returns LowCardinality for FixedStr32.
func (c *ColFixedStr32) LowCardinality() *ColLowCardinality[[32]byte] {
return &ColLowCardinality[[32]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go
index cdaf62d..7816cb5 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr32) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr32) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go
index 3777e5e..7e1386a 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr32) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr32) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go
index 09837fa..529ba31 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr512) AppendArr(vs [][512]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr512 .
+// LowCardinality returns LowCardinality for FixedStr512.
func (c *ColFixedStr512) LowCardinality() *ColLowCardinality[[512]byte] {
return &ColLowCardinality[[512]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go
index aa8ea31..b3cf574 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr512) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr512) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go
index 970ca0f..58bd05c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr512) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr512) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 512
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go
index 38849cc..ad31715 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr64) AppendArr(vs [][64]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr64 .
+// LowCardinality returns LowCardinality for FixedStr64.
func (c *ColFixedStr64) LowCardinality() *ColLowCardinality[[64]byte] {
return &ColLowCardinality[[64]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go
index 89c1f24..4af33c0 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr64) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr64) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go
index 62ec09e..34a1078 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr64) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr64) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 64
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go
index a58723e..a233df7 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go
@@ -42,7 +42,7 @@ func (c *ColFixedStr8) AppendArr(vs [][8]byte) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for FixedStr8 .
+// LowCardinality returns LowCardinality for FixedStr8.
func (c *ColFixedStr8) LowCardinality() *ColLowCardinality[[8]byte] {
return &ColLowCardinality[[8]byte]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go
index 086ea6f..d830f16 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColFixedStr8) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFixedStr8) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go
index 9991c06..56cf28f 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFixedStr8) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFixedStr8) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go
index 7031f11..01a35f3 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go
@@ -42,7 +42,7 @@ func (c *ColFloat32) AppendArr(vs []float32) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Float32 .
+// LowCardinality returns LowCardinality for Float32.
func (c *ColFloat32) LowCardinality() *ColLowCardinality[float32] {
return &ColLowCardinality[float32]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go
index f400aef..dde651f 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go
@@ -54,3 +54,7 @@ func (c ColFloat32) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFloat32) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go
index 2ded35f..f1d20f4 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFloat32) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFloat32) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go
index c210eb8..4df408c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go
@@ -42,7 +42,7 @@ func (c *ColFloat64) AppendArr(vs []float64) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Float64 .
+// LowCardinality returns LowCardinality for Float64.
func (c *ColFloat64) LowCardinality() *ColLowCardinality[float64] {
return &ColLowCardinality[float64]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go
index 68281ae..9bdaa81 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go
@@ -54,3 +54,7 @@ func (c ColFloat64) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColFloat64) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go
index f16fd39..16e2974 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColFloat64) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColFloat64) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 64 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go
index 5e982c4..3ff6e76 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go
@@ -42,7 +42,7 @@ func (c *ColInt128) AppendArr(vs []Int128) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Int128 .
+// LowCardinality returns LowCardinality for Int128.
func (c *ColInt128) LowCardinality() *ColLowCardinality[Int128] {
return &ColLowCardinality[Int128]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go
index 5902d3f..b38a58d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColInt128) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColInt128) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go
index c5862ff..8098501 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColInt128) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColInt128) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 128 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go
index 212801d..d2d0795 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go
@@ -42,7 +42,7 @@ func (c *ColInt16) AppendArr(vs []int16) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Int16 .
+// LowCardinality returns LowCardinality for Int16.
func (c *ColInt16) LowCardinality() *ColLowCardinality[int16] {
return &ColLowCardinality[int16]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go
index 75523a4..2c85f10 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColInt16) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColInt16) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go
index 6ba5e50..b994fb4 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColInt16) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColInt16) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 16 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go
index 5d7454b..05c5073 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go
@@ -42,7 +42,7 @@ func (c *ColInt256) AppendArr(vs []Int256) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Int256 .
+// LowCardinality returns LowCardinality for Int256.
func (c *ColInt256) LowCardinality() *ColLowCardinality[Int256] {
return &ColLowCardinality[Int256]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go
index 0b9f8f1..9c0589d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColInt256) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColInt256) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go
index 2433bc9..3c49ead 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColInt256) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColInt256) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 256 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go
index 46b0958..63d4cdd 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go
@@ -42,7 +42,7 @@ func (c *ColInt32) AppendArr(vs []int32) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Int32 .
+// LowCardinality returns LowCardinality for Int32.
func (c *ColInt32) LowCardinality() *ColLowCardinality[int32] {
return &ColLowCardinality[int32]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go
index 52f78c1..54c6a2c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColInt32) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColInt32) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go
index b2e1025..8236c9d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColInt32) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColInt32) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go
index 4c8875c..01061c0 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go
@@ -42,7 +42,7 @@ func (c *ColInt64) AppendArr(vs []int64) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Int64 .
+// LowCardinality returns LowCardinality for Int64.
func (c *ColInt64) LowCardinality() *ColLowCardinality[int64] {
return &ColLowCardinality[int64]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go
index 400367d..e2dba72 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColInt64) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColInt64) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go
index 5c6f265..aa15d1e 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColInt64) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColInt64) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 64 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go
index 98a71a2..32f3003 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go
@@ -42,7 +42,7 @@ func (c *ColInt8) AppendArr(vs []int8) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for Int8 .
+// LowCardinality returns LowCardinality for Int8.
func (c *ColInt8) LowCardinality() *ColLowCardinality[int8] {
return &ColLowCardinality[int8]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go
index a79459d..343b613 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go
@@ -42,3 +42,7 @@ func (c ColInt8) EncodeColumn(b *Buffer) {
b.Buf[i+start] = uint8(v[i])
}
}
+
+func (c ColInt8) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go
index 1c62c7d..daa6715 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go
@@ -37,3 +37,12 @@ func (c ColInt8) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColInt8) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ src := *(*[]byte)(unsafe.Pointer(&v))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go b/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go
index 57bb2e3..c9ecb4c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_interval.go
@@ -110,3 +110,7 @@ func (c *ColInterval) Reset() {
func (c ColInterval) EncodeColumn(b *Buffer) {
c.Values.EncodeColumn(b)
}
+
+func (c ColInterval) WriteColumn(w *Writer) {
+ c.Values.WriteColumn(w)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go
index 4c7a0bc..7559e79 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go
@@ -42,7 +42,7 @@ func (c *ColIPv4) AppendArr(vs []IPv4) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for IPv4 .
+// LowCardinality returns LowCardinality for IPv4.
func (c *ColIPv4) LowCardinality() *ColLowCardinality[IPv4] {
return &ColLowCardinality[IPv4]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go
index 8b0b790..cd687b2 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColIPv4) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColIPv4) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go
index 5fc0b7c..1039fc1 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColIPv4) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColIPv4) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go
index 5907bd7..cff4ee9 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go
@@ -42,7 +42,7 @@ func (c *ColIPv6) AppendArr(vs []IPv6) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for IPv6 .
+// LowCardinality returns LowCardinality for IPv6.
func (c *ColIPv6) LowCardinality() *ColLowCardinality[IPv6] {
return &ColLowCardinality[IPv6]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go
index 9a5870d..a2d317f 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColIPv6) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColIPv6) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go
index 5650b49..e4fe0a0 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColIPv6) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColIPv6) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 128 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_json_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_json_str.go
new file mode 100644
index 0000000..1d23a3a
--- /dev/null
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_json_str.go
@@ -0,0 +1,158 @@
+package proto
+
+import (
+ "github.com/go-faster/errors"
+)
+
+const JSONStringSerializationVersion uint64 = 1
+
+// ColJSONStr represents String column.
+//
+// Use ColJSONBytes for []bytes ColumnOf implementation.
+type ColJSONStr struct {
+ Str ColStr
+}
+
+// Append string to column.
+func (c *ColJSONStr) Append(v string) {
+ c.Str.Append(v)
+}
+
+// AppendBytes append byte slice as string to column.
+func (c *ColJSONStr) AppendBytes(v []byte) {
+ c.Str.AppendBytes(v)
+}
+
+func (c *ColJSONStr) AppendArr(v []string) {
+ c.Str.AppendArr(v)
+}
+
+// Compile-time assertions for ColJSONStr.
+var (
+ _ ColInput = ColJSONStr{}
+ _ ColResult = (*ColJSONStr)(nil)
+ _ Column = (*ColJSONStr)(nil)
+ _ ColumnOf[string] = (*ColJSONStr)(nil)
+ _ Arrayable[string] = (*ColJSONStr)(nil)
+)
+
+// Type returns ColumnType of JSON.
+func (ColJSONStr) Type() ColumnType {
+ return ColumnTypeJSON
+}
+
+// Rows returns count of rows in column.
+func (c ColJSONStr) Rows() int {
+ return c.Str.Rows()
+}
+
+// Reset resets data in row, preserving capacity for efficiency.
+func (c *ColJSONStr) Reset() {
+ c.Str.Reset()
+}
+
+// EncodeColumn encodes String rows to *Buffer.
+func (c ColJSONStr) EncodeColumn(b *Buffer) {
+ b.PutUInt64(JSONStringSerializationVersion)
+
+ c.Str.EncodeColumn(b)
+}
+
+// WriteColumn writes JSON rows to *Writer.
+func (c ColJSONStr) WriteColumn(w *Writer) {
+ w.ChainBuffer(func(b *Buffer) {
+ b.PutUInt64(JSONStringSerializationVersion)
+ })
+
+ c.Str.WriteColumn(w)
+}
+
+// ForEach calls f on each string from column.
+func (c ColJSONStr) ForEach(f func(i int, s string) error) error {
+ return c.Str.ForEach(f)
+}
+
+// First returns the first row of the column.
+func (c ColJSONStr) First() string {
+ return c.Str.First()
+}
+
+// Row returns row with number i.
+func (c ColJSONStr) Row(i int) string {
+ return c.Str.Row(i)
+}
+
+// RowBytes returns row with number i as byte slice.
+func (c ColJSONStr) RowBytes(i int) []byte {
+ return c.Str.RowBytes(i)
+}
+
+// ForEachBytes calls f on each string from column as byte slice.
+func (c ColJSONStr) ForEachBytes(f func(i int, b []byte) error) error {
+ return c.Str.ForEachBytes(f)
+}
+
+// DecodeColumn decodes String rows from *Reader.
+func (c *ColJSONStr) DecodeColumn(r *Reader, rows int) error {
+ jsonSerializationVersion, err := r.UInt64()
+ if err != nil {
+ return errors.Wrap(err, "failed to read json serialization version")
+ }
+
+ if jsonSerializationVersion != JSONStringSerializationVersion {
+ return errors.Errorf("received invalid JSON string serialization version %d. Setting \"output_format_native_write_json_as_string\" must be enabled.", jsonSerializationVersion)
+ }
+
+ return c.Str.DecodeColumn(r, rows)
+}
+
+// LowCardinality returns LowCardinality(JSON).
+func (c *ColJSONStr) LowCardinality() *ColLowCardinality[string] {
+ return c.Str.LowCardinality()
+}
+
+// Array is helper that creates Array(JSON).
+func (c *ColJSONStr) Array() *ColArr[string] {
+ return c.Str.Array()
+}
+
+// Nullable is helper that creates Nullable(JSON).
+func (c *ColJSONStr) Nullable() *ColNullable[string] {
+ return c.Str.Nullable()
+}
+
+// ColJSONBytes is ColJSONStr wrapper to be ColumnOf for []byte.
+type ColJSONBytes struct {
+ ColJSONStr
+}
+
+// Row returns row with number i.
+func (c ColJSONBytes) Row(i int) []byte {
+ return c.RowBytes(i)
+}
+
+// Append byte slice to column.
+func (c *ColJSONBytes) Append(v []byte) {
+ c.AppendBytes(v)
+}
+
+// AppendArr append slice of byte slices to column.
+func (c *ColJSONBytes) AppendArr(v [][]byte) {
+ for _, s := range v {
+ c.Append(s)
+ }
+}
+
+// Array is helper that creates Array(JSON).
+func (c *ColJSONBytes) Array() *ColArr[[]byte] {
+ return &ColArr[[]byte]{
+ Data: c,
+ }
+}
+
+// Nullable is helper that creates Nullable(JSON).
+func (c *ColJSONBytes) Nullable() *ColNullable[[]byte] {
+ return &ColNullable[[]byte]{
+ Values: c,
+ }
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go
index ffed580..4471fb8 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go
@@ -230,6 +230,41 @@ func (c *ColLowCardinality[T]) EncodeColumn(b *Buffer) {
}
}
+func (c *ColLowCardinality[T]) WriteColumn(w *Writer) {
+ // Using pointer receiver as Prepare() is expected to be called before
+ // encoding.
+
+ if c.Rows() == 0 {
+ // Skipping encoding entirely.
+ return
+ }
+
+ w.ChainBuffer(func(b *Buffer) {
+ // Meta encodes whether reader should update
+ // low cardinality metadata and keys column type.
+ meta := cardinalityUpdateAll | int64(c.key)
+ b.PutInt64(meta)
+
+ // Writing index (dictionary).
+ b.PutInt64(int64(c.index.Rows()))
+ })
+ c.index.WriteColumn(w)
+
+ w.ChainBuffer(func(b *Buffer) {
+ b.PutInt64(int64(c.Rows()))
+ })
+ switch c.key {
+ case KeyUInt8:
+ c.keys8.WriteColumn(w)
+ case KeyUInt16:
+ c.keys16.WriteColumn(w)
+ case KeyUInt32:
+ c.keys32.WriteColumn(w)
+ case KeyUInt64:
+ c.keys64.WriteColumn(w)
+ }
+}
+
func (c *ColLowCardinality[T]) Reset() {
for k := range c.kv {
delete(c.kv, k)
@@ -286,17 +321,6 @@ func (c ColLowCardinality[T]) Rows() int {
// Prepare column for ingestion.
func (c *ColLowCardinality[T]) Prepare() error {
- // Select minimum possible size for key.
- if n := len(c.Values); n < math.MaxUint8 {
- c.key = KeyUInt8
- } else if n < math.MaxUint16 {
- c.key = KeyUInt16
- } else if uint32(n) < math.MaxUint32 {
- c.key = KeyUInt32
- } else {
- c.key = KeyUInt64
- }
-
// Allocate keys slice.
c.keys = append(c.keys[:0], make([]int, len(c.Values))...)
if c.kv == nil {
@@ -317,6 +341,17 @@ func (c *ColLowCardinality[T]) Prepare() error {
c.keys[i] = idx
}
+ // Select minimum possible size for key.
+ if n := last; n < math.MaxUint8 {
+ c.key = KeyUInt8
+ } else if n < math.MaxUint16 {
+ c.key = KeyUInt16
+ } else if uint32(n) < math.MaxUint32 {
+ c.key = KeyUInt32
+ } else {
+ c.key = KeyUInt64
+ }
+
// Fill key column with key indexes.
switch c.key {
case KeyUInt8:
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go
index 665dc20..9928634 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality_raw.go
@@ -155,3 +155,28 @@ func (c ColLowCardinalityRaw) EncodeColumn(b *Buffer) {
b.PutInt64(int64(k.Rows()))
k.EncodeColumn(b)
}
+
+func (c ColLowCardinalityRaw) WriteColumn(w *Writer) {
+ if c.Rows() == 0 {
+ // Skipping encoding entirely.
+ return
+ }
+
+ w.ChainBuffer(func(b *Buffer) {
+ // Meta encodes whether reader should update
+ // low cardinality metadata and keys column type.
+ meta := cardinalityUpdateAll | int64(c.Key)
+ b.PutInt64(meta)
+
+ // Writing index (dictionary).
+ b.PutInt64(int64(c.Index.Rows()))
+ })
+ c.Index.WriteColumn(w)
+
+ // Sequence of values as indexes in dictionary.
+ k := c.Keys()
+ w.ChainBuffer(func(b *Buffer) {
+ b.PutInt64(int64(k.Rows()))
+ })
+ k.WriteColumn(w)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go
index 90925fb..e27781d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go
@@ -164,6 +164,16 @@ func (c ColMap[K, V]) EncodeColumn(b *Buffer) {
c.Values.EncodeColumn(b)
}
+func (c ColMap[K, V]) WriteColumn(w *Writer) {
+ if c.Rows() == 0 {
+ return
+ }
+
+ c.Offsets.WriteColumn(w)
+ c.Keys.WriteColumn(w)
+ c.Values.WriteColumn(w)
+}
+
// Prepare ensures Preparable column propagation.
func (c ColMap[K, V]) Prepare() error {
if v, ok := c.Keys.(Preparable); ok {
@@ -181,18 +191,18 @@ func (c ColMap[K, V]) Prepare() error {
// Infer ensures Inferable column propagation.
func (c *ColMap[K, V]) Infer(t ColumnType) error {
- elems := strings.Split(string(t.Elem()), ",")
- if len(elems) != 2 {
+ keytype, valtype, hascomma := strings.Cut(string(t.Elem()), ",")
+ if !hascomma || strings.ContainsRune(valtype, ',') {
return errors.New("invalid map type")
}
if v, ok := c.Keys.(Inferable); ok {
- ct := ColumnType(strings.TrimSpace(elems[0]))
+ ct := ColumnType(strings.TrimSpace(keytype))
if err := v.Infer(ct); err != nil {
return errors.Wrap(err, "infer data")
}
}
if v, ok := c.Values.(Inferable); ok {
- ct := ColumnType(strings.TrimSpace(elems[1]))
+ ct := ColumnType(strings.TrimSpace(valtype))
if err := v.Infer(ct); err != nil {
return errors.Wrap(err, "infer data")
}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_map_go123.go b/vendor/github.com/ClickHouse/ch-go/proto/col_map_go123.go
new file mode 100644
index 0000000..e9d83c5
--- /dev/null
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_map_go123.go
@@ -0,0 +1,25 @@
+//go:build go1.23
+
+package proto
+
+import "iter"
+
+// RowRange returns a [iter.Seq2] iterator over i-th row.
+func (c ColMap[K, V]) RowRange(i int) iter.Seq2[K, V] {
+ var start int
+ end := int(c.Offsets[i])
+ if i > 0 {
+ start = int(c.Offsets[i-1])
+ }
+
+ return func(yield func(K, V) bool) {
+ for idx := start; idx < end; idx++ {
+ if !yield(
+ c.Keys.Row(idx),
+ c.Values.Row(idx),
+ ) {
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go
index 1a82509..d72eeba 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go
@@ -71,3 +71,7 @@ func (c ColNothing) EncodeColumn(b *Buffer) {
}
b.PutRaw(make([]byte, c))
}
+
+func (c ColNothing) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go
index 516245f..fd3615d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go
@@ -117,6 +117,12 @@ func (c ColNullable[T]) Row(i int) Nullable[T] {
}
}
+func (c *ColNullable[T]) Array() *ColArr[Nullable[T]] {
+ return &ColArr[Nullable[T]]{
+ Data: c,
+ }
+}
+
func (c *ColNullable[T]) Reset() {
c.Nulls.Reset()
c.Values.Reset()
@@ -127,6 +133,11 @@ func (c ColNullable[T]) EncodeColumn(b *Buffer) {
c.Values.EncodeColumn(b)
}
+func (c ColNullable[T]) WriteColumn(w *Writer) {
+ c.Nulls.WriteColumn(w)
+ c.Values.WriteColumn(w)
+}
+
func (c ColNullable[T]) IsElemNull(i int) bool {
if i < c.Rows() {
return c.Nulls[i] == boolTrue
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_point.go b/vendor/github.com/ClickHouse/ch-go/proto/col_point.go
index 0e1549f..5d7834f 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_point.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_point.go
@@ -61,3 +61,8 @@ func (c ColPoint) EncodeColumn(b *Buffer) {
c.X.EncodeColumn(b)
c.Y.EncodeColumn(b)
}
+
+func (c ColPoint) WriteColumn(w *Writer) {
+ c.X.WriteColumn(w)
+ c.Y.WriteColumn(w)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go
index 325a17b..d56e357 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go
@@ -82,3 +82,17 @@ func (c *ColRawOf[X]) DecodeColumn(r *Reader, rows int) error {
}
return nil
}
+
+// WriteColumn write ColRawOf rows to *Writer.
+func (c ColRawOf[X]) WriteColumn(w *Writer) {
+ if len(c) == 0 {
+ return
+ }
+ var x X
+ size := unsafe.Sizeof(x) // #nosec G103
+ s := *(*slice)(unsafe.Pointer(&c)) // #nosec G103
+ s.Len *= size
+ s.Cap *= size
+ src := *(*[]byte)(unsafe.Pointer(&s)) // #nosec G103
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go
index 8f48ad7..9786f0e 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go
@@ -76,6 +76,20 @@ func (c ColStr) EncodeColumn(b *Buffer) {
}
}
+// WriteColumn writes String rows to *Writer.
+func (c ColStr) WriteColumn(w *Writer) {
+ buf := make([]byte, binary.MaxVarintLen64)
+ // Writing values from c.Buf directly might improve performance if [ColStr] contains a few rows of very long strings.
+ // However, most of the time it is quite opposite, so we copy data.
+ w.ChainBuffer(func(b *Buffer) {
+ for _, p := range c.Pos {
+ n := binary.PutUvarint(buf, uint64(p.End-p.Start))
+ b.PutRaw(buf[:n])
+ b.PutRaw(c.Buf[p.Start:p.End])
+ }
+ })
+}
+
// ForEach calls f on each string from column.
func (c ColStr) ForEach(f func(i int, s string) error) error {
return c.ForEachBytes(func(i int, b []byte) error {
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go b/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go
index ac7ad6c..7ee1bef 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_tuple.go
@@ -167,3 +167,9 @@ func (c ColTuple) EncodeColumn(b *Buffer) {
v.EncodeColumn(b)
}
}
+
+func (c ColTuple) WriteColumn(w *Writer) {
+ for _, v := range c {
+ v.WriteColumn(w)
+ }
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go
index e34f07e..58b83a3 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go
@@ -42,7 +42,7 @@ func (c *ColUInt128) AppendArr(vs []UInt128) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for UInt128 .
+// LowCardinality returns LowCardinality for UInt128.
func (c *ColUInt128) LowCardinality() *ColLowCardinality[UInt128] {
return &ColLowCardinality[UInt128]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go
index bbe55dc..785cc9c 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColUInt128) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColUInt128) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go
index 5989b5f..4ee0aa0 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColUInt128) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColUInt128) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 128 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go
index 7bc8ba6..4d41de4 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go
@@ -42,7 +42,7 @@ func (c *ColUInt16) AppendArr(vs []uint16) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for UInt16 .
+// LowCardinality returns LowCardinality for UInt16.
func (c *ColUInt16) LowCardinality() *ColLowCardinality[uint16] {
return &ColLowCardinality[uint16]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go
index 219f3a6..3d45fbe 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColUInt16) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColUInt16) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go
index d98d953..126c46e 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColUInt16) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColUInt16) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 16 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go
index b68a119..0c7d912 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go
@@ -42,7 +42,7 @@ func (c *ColUInt256) AppendArr(vs []UInt256) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for UInt256 .
+// LowCardinality returns LowCardinality for UInt256.
func (c *ColUInt256) LowCardinality() *ColLowCardinality[UInt256] {
return &ColLowCardinality[UInt256]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go
index 68633e1..4dda222 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColUInt256) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColUInt256) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go
index 02488d3..df657cc 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColUInt256) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColUInt256) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 256 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go
index 41abca5..4f205d2 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go
@@ -42,7 +42,7 @@ func (c *ColUInt32) AppendArr(vs []uint32) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for UInt32 .
+// LowCardinality returns LowCardinality for UInt32.
func (c *ColUInt32) LowCardinality() *ColLowCardinality[uint32] {
return &ColLowCardinality[uint32]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go
index 0bc7de9..11ebb11 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColUInt32) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColUInt32) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go
index 3ddfa76..fad9cb1 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColUInt32) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColUInt32) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 32 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go
index 4521cd4..f3471d1 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go
@@ -42,7 +42,7 @@ func (c *ColUInt64) AppendArr(vs []uint64) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for UInt64 .
+// LowCardinality returns LowCardinality for UInt64.
func (c *ColUInt64) LowCardinality() *ColLowCardinality[uint64] {
return &ColLowCardinality[uint64]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go
index deea8a4..ada64d5 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go
@@ -53,3 +53,7 @@ func (c ColUInt64) EncodeColumn(b *Buffer) {
offset += size
}
}
+
+func (c ColUInt64) WriteColumn(w *Writer) {
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go
index 664f80f..bb73e18 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go
@@ -43,3 +43,18 @@ func (c ColUInt64) EncodeColumn(b *Buffer) {
dst := b.Buf[offset:]
copy(dst, src)
}
+
+func (c ColUInt64) WriteColumn(w *Writer) {
+ v := c
+ if len(v) == 0 {
+ return
+ }
+ const size = 64 / 8
+
+ s := *(*slice)(unsafe.Pointer(&v))
+ s.Len *= size
+ s.Cap *= size
+
+ src := *(*[]byte)(unsafe.Pointer(&s))
+ w.ChainWrite(src)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go
index 02c0516..e34a067 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go
@@ -42,7 +42,7 @@ func (c *ColUInt8) AppendArr(vs []uint8) {
*c = append(*c, vs...)
}
-// LowCardinality returns LowCardinality for UInt8 .
+// LowCardinality returns LowCardinality for UInt8.
func (c *ColUInt8) LowCardinality() *ColLowCardinality[uint8] {
return &ColLowCardinality[uint8]{
index: c,
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go
index ec5ff19..2acccc6 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_safe_gen.go
@@ -31,3 +31,7 @@ func (c ColUInt8) EncodeColumn(b *Buffer) {
}
b.Buf = append(b.Buf, v...)
}
+
+func (c ColUInt8) WriteColumn(w *Writer) {
+ w.ChainWrite([]byte(c))
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go
index 8de9408..d74508b 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go
@@ -34,3 +34,12 @@ func (c ColUUID) EncodeColumn(b *Buffer) {
}
bswap.Swap64(b.Buf) // BE <-> LE
}
+
+// WriteColumn encodes ColUUID rows to *Writer.
+func (c ColUUID) WriteColumn(w *Writer) {
+ if len(c) == 0 {
+ return
+ }
+ // Can't write UUID as-is: bswap is required.
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go
index 18fa73f..877bc4d 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go
@@ -47,3 +47,12 @@ func (c ColUUID) EncodeColumn(b *Buffer) {
copy(dst, src)
bswap.Swap64(dst) // BE <-> LE
}
+
+// WriteColumn encodes ColUUID rows to *Writer.
+func (c ColUUID) WriteColumn(w *Writer) {
+ if len(c) == 0 {
+ return
+ }
+ // Can't write UUID as-is: bswap is required.
+ w.ChainBuffer(c.EncodeColumn)
+}
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/column.go b/vendor/github.com/ClickHouse/ch-go/proto/column.go
index 1cdeb31..4f0dbff 100644
--- a/vendor/github.com/ClickHouse/ch-go/proto/column.go
+++ b/vendor/github.com/ClickHouse/ch-go/proto/column.go
@@ -2,6 +2,7 @@ package proto
import (
"fmt"
+ "strconv"
"strings"
"github.com/go-faster/errors"
@@ -12,6 +13,7 @@ type ColInput interface {
Type() ColumnType
Rows() int
EncodeColumn(b *Buffer)
+ WriteColumn(w *Writer)
}
// ColResult column.
@@ -73,8 +75,8 @@ func (c ColumnType) Base() ColumnType {
}
var (
v = string(c)
- start = strings.Index(v, "(")
- end = strings.LastIndex(v, ")")
+ start = strings.IndexByte(v, '(')
+ end = strings.LastIndexByte(v, ')')
)
if start <= 0 || end <= 0 || end < start {
return c
@@ -82,30 +84,64 @@ func (c ColumnType) Base() ColumnType {
return c[:start]
}
+// reduces Decimal(P, ...) to Decimal32/Decimal64/Decimal128/Decimal256
+// returns c if any errors occur during conversion
+func (c ColumnType) decimalDowncast() ColumnType {
+ if c.Base() != ColumnTypeDecimal {
+ return c
+ }
+ elem := c.Elem()
+ precStr, _, _ := strings.Cut(string(elem), ",")
+ precStr = strings.TrimSpace(precStr)
+ prec, err := strconv.Atoi(precStr)
+ if err != nil {
+ return c
+ }
+ switch {
+ case prec < 10:
+ return ColumnTypeDecimal32
+ case prec < 19:
+ return ColumnTypeDecimal64
+ case prec < 39:
+ return ColumnTypeDecimal128
+ case prec < 77:
+ return ColumnTypeDecimal256
+ default:
+ return c
+ }
+}
+
// Conflicts reports whether two types conflict.
func (c ColumnType) Conflicts(b ColumnType) bool {
if c == b {
return false
}
- {
- a := c
- if b.Base() == ColumnTypeEnum8 || b.Base() == ColumnTypeEnum16 {
- a, b = b, a
- }
- switch {
- case a.Base() == ColumnTypeEnum8 && b == ColumnTypeInt8:
- return false
- case a.Base() == ColumnTypeEnum16 && b == ColumnTypeInt16:
- return false
- }
+ cBase := c.Base()
+ bBase := b.Base()
+ if (cBase == ColumnTypeEnum8 && b == ColumnTypeInt8) ||
+ (cBase == ColumnTypeEnum16 && b == ColumnTypeInt16) ||
+ (bBase == ColumnTypeEnum8 && c == ColumnTypeInt8) ||
+ (bBase == ColumnTypeEnum16 && c == ColumnTypeInt16) {
+ return false
+ }
+ if cBase == ColumnTypeDecimal || bBase == ColumnTypeDecimal {
+ return c.decimalDowncast() != b.decimalDowncast()
}
- if c.Base() != b.Base() {
+
+ if cBase != bBase {
return true
}
+ switch cBase {
+ case ColumnTypeEnum8, ColumnTypeEnum16:
+ return false
+ }
+
if c.normalizeCommas() == b.normalizeCommas() {
return false
}
- switch c.Base() {
+ switch cBase {
+ case ColumnTypeArray, ColumnTypeNullable, ColumnTypeLowCardinality:
+ return c.Elem().Conflicts(b.Elem())
case ColumnTypeDateTime, ColumnTypeDateTime64:
// TODO(ernado): improve check
return false
@@ -149,8 +185,8 @@ func (c ColumnType) Elem() ColumnType {
}
var (
v = string(c)
- start = strings.Index(v, "(")
- end = strings.LastIndex(v, ")")
+ start = strings.IndexByte(v, '(')
+ end = strings.LastIndexByte(v, ')')
)
if start <= 0 || end <= 0 || end < start {
// No element.
@@ -206,6 +242,7 @@ const (
ColumnTypeBool ColumnType = "Bool"
ColumnTypeTuple ColumnType = "Tuple"
ColumnTypeNullable ColumnType = "Nullable"
+ ColumnTypeDecimal ColumnType = "Decimal"
ColumnTypeDecimal32 ColumnType = "Decimal32"
ColumnTypeDecimal64 ColumnType = "Decimal64"
ColumnTypeDecimal128 ColumnType = "Decimal128"
@@ -213,6 +250,7 @@ const (
ColumnTypePoint ColumnType = "Point"
ColumnTypeInterval ColumnType = "Interval"
ColumnTypeNothing ColumnType = "Nothing"
+ ColumnTypeJSON ColumnType = "JSON"
)
// colWrap wraps Column with type t.
diff --git a/vendor/github.com/ClickHouse/ch-go/proto/writer.go b/vendor/github.com/ClickHouse/ch-go/proto/writer.go
new file mode 100644
index 0000000..c3ba4c4
--- /dev/null
+++ b/vendor/github.com/ClickHouse/ch-go/proto/writer.go
@@ -0,0 +1,73 @@
+package proto
+
+import (
+ "io"
+ "net"
+)
+
+// Writer is a column writer.
+//
+// It helps to reduce memory footprint by writing column using vector I/O.
+type Writer struct {
+ conn io.Writer
+
+ buf *Buffer
+ bufOffset int
+ needCut bool
+
+ vec net.Buffers
+}
+
+// NewWriter creates new [Writer].
+func NewWriter(conn io.Writer, buf *Buffer) *Writer {
+ w := &Writer{
+ conn: conn,
+ buf: buf,
+ vec: make(net.Buffers, 0, 16),
+ }
+ return w
+}
+
+// ChainWrite adds buffer to the vector to write later.
+//
+// Passed byte slice may be captured until [Writer.Flush] is called.
+func (w *Writer) ChainWrite(data []byte) {
+ w.cutBuffer()
+ w.vec = append(w.vec, data)
+}
+
+// ChainBuffer creates a temporary buffer and adds it to the vector to write later.
+//
+// Data is not written immediately, call [Writer.Flush] to flush data.
+//
+// NB: do not retain buffer.
+func (w *Writer) ChainBuffer(cb func(*Buffer)) {
+ cb(w.buf)
+}
+
+func (w *Writer) cutBuffer() {
+ newOffset := len(w.buf.Buf)
+ data := w.buf.Buf[w.bufOffset:newOffset:newOffset]
+ if len(data) == 0 {
+ return
+ }
+ w.bufOffset = newOffset
+ w.vec = append(w.vec, data)
+}
+
+func (w *Writer) reset() {
+ w.bufOffset = 0
+ w.needCut = false
+ w.buf.Reset()
+ // Do not hold references, to avoid memory leaks.
+ clear(w.vec)
+ w.vec = w.vec[:0]
+}
+
+// Flush flushes all data to writer.
+func (w *Writer) Flush() (n int64, err error) {
+ w.cutBuffer()
+ n, err = w.vec.WriteTo(w.conn)
+ w.reset()
+ return n, err
+}
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index 8969526..7c7f0c6 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -1,6 +1,7 @@
# A minimal logging API for Go
[](https://pkg.go.dev/github.com/go-logr/logr)
+[](https://goreportcard.com/report/github.com/go-logr/logr)
[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index a229538..4528059 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,5 +1,5 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
before:
hooks:
- ./gen.sh
@@ -99,7 +99,7 @@ archives:
checksum:
name_template: 'checksums.txt'
snapshot:
- name_template: "{{ .Tag }}-next"
+ version_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 1f72cdd..de264c8 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,27 @@ This package provides various compression algorithms.
# changelog
+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
+ * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
+ * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
+ * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
+ * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
+ * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
+
+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
+ * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
+ * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
+ * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
+ * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
+
+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
+ * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
+ * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
+
+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
+ * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
+ * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
+
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
@@ -55,6 +76,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+
+
+ See changes to v1.16.x
+
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
@@ -77,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
- * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+ * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
@@ -93,6 +118,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
+
See changes to v1.15.x
@@ -131,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
- * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
@@ -334,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
- * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@@ -513,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
@@ -560,6 +586,8 @@ the stateless compress described below.
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+To disable all assembly add `-tags=noasm`. This works across all packages.
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index cc05d0f..0c7dd4f 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 54bd08b..0f56b02 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 0 {
- fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
@@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 20 {
- fmt.Fprintf(w, "%d errros, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 2aa6a95..2754bac 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
- // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // length emitted down below is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 9f17ce6..9c28840 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("Compression modes: 0b%b", compMode)
}
+ if compMode&3 != 0 {
+ return errors.New("corrupt block: reserved bits not zero")
+ }
for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
@@ -595,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
- println("Reading table for", tableIndex(i))
+ if debugDecoder {
+ println("Reading table for", tableIndex(i))
+ }
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 2cfe925..32a7f40 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
return nil
}
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(length)
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, val)
+}
+
// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder(data []byte) int {
if len(data) > maxSequences || len(data) < 2 {
@@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
return b.encodeLits(b.literals, rawAllLits)
}
+ if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+ // Check common RLE cases.
+ seq := b.sequences[0]
+ if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+ // Offset == 1 and 0 or 1 literals.
+ b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+ return nil
+ }
+ }
+
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f04aaa2..bbca172 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -82,7 +82,7 @@ var (
// can run multiple concurrent stateless decodes. It is even possible to
// use stateless decodes while a stream is being decoded.
//
-// The Reset function can be used to initiate a new stream, which is will considerably
+// The Reset function can be used to initiate a new stream, which will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
initPredefined()
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index 8d5567f..b7b8316 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
@@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if offset == 0 {
continue
}
+ if int(offset) >= len(o.History) {
+ continue
+ }
if offset > 3 {
newOffsets[offset-3]++
} else {
@@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
@@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
fakeLength += v
hist[i] = uint32(v)
}
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 87f4287..4613724 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
break
}
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 20d25b0..84a79fd 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
e.cur = e.maxMatchOff
break
}
-
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
@@ -168,9 +179,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -199,12 +210,12 @@ encodeLoop:
// Index match start+1 (long) -> s - 1
index0 := s + repOff
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -230,9 +241,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -259,11 +270,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -697,9 +708,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -727,12 +738,12 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -761,9 +772,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -790,11 +801,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index a154c18..d36be7b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -138,9 +138,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -166,11 +166,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -798,9 +798,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -826,11 +826,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 72af7ef..8f8223c 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,6 +6,7 @@ package zstd
import (
"crypto/rand"
+ "errors"
"fmt"
"io"
"math"
@@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) {
// and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state
+ if s.eofWritten {
+ return 0, ErrEncoderClosed
+ }
for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc {
@@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error {
return nil
}
if final && len(s.filling) > 0 {
- s.current = e.EncodeAll(s.filling, s.current[:0])
+ s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
@@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
+ if final {
+ s.eofWritten = true
+ }
go func(src []byte) {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
@@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error {
blk := enc.Block()
enc.Encode(blk, src)
blk.last = final
- if final {
- s.eofWritten = true
- }
// Wait for pending writes.
s.wWg.Wait()
if s.writeErr != nil {
@@ -401,12 +405,20 @@ func (e *Encoder) Flush() error {
if len(s.filling) > 0 {
err := e.nextBlock(false)
if err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return s.err
}
return s.writeErr
@@ -422,6 +434,9 @@ func (e *Encoder) Close() error {
}
err := e.nextBlock(true)
if err != nil {
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
if s.frameContentSize > 0 {
@@ -459,6 +474,11 @@ func (e *Encoder) Close() error {
}
_, s.err = s.w.Write(frame)
}
+ if s.err == nil {
+ s.err = ErrEncoderClosed
+ return nil
+ }
+
return s.err
}
@@ -469,6 +489,15 @@ func (e *Encoder) Close() error {
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ e.init.Do(e.initialize)
+ enc := <-e.encoders
+ defer func() {
+ e.encoders <- enc
+ }()
+ return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
@@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
- e.init.Do(e.initialize)
- enc := <-e.encoders
- defer func() {
- // Release encoder reference to last block.
- // If a non-single block is needed the encoder will reset again.
- e.encoders <- enc
- }()
+
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 53e160f..e47af66 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return err
}
- printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ if debugDecoder {
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ }
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 17901e0..ae7d4d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -162,12 +162,12 @@ finalize:
MOVD h, ret+24(FP)
RET
-// func writeBlocks(d *Digest, b []byte) int
+// func writeBlocks(s *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
- MOVD d+0(FP), digest
+ MOVD s+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
index 9a7655c..0782b86 100644
--- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 8adabd8..c59f17e 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
- return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
@@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
return io.ErrUnexpectedEOF
}
- return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 5b06174..f5591fa 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 4be7cc7..066bef2 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -88,6 +88,10 @@ var (
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
+ // ErrEncoderClosed will be returned if the Encoder was used after
+ // Close has been called.
+ ErrEncoderClosed = errors.New("encoder used after Close")
+
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
index e964654..04aaca8 100644
--- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -246,7 +246,7 @@ func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.Compressi
b.src = src // keep track of the source for content checksum
if f.Descriptor.Flags.BlockChecksum() {
- b.Checksum = xxh32.ChecksumZero(src)
+ b.Checksum = xxh32.ChecksumZero(b.Data)
}
return b
}
@@ -328,7 +328,7 @@ func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byt
dst = dst[:n]
}
if f.Descriptor.Flags.BlockChecksum() {
- if c := xxh32.ChecksumZero(dst); c != b.Checksum {
+ if c := xxh32.ChecksumZero(b.data); c != b.Checksum {
err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
return nil, err
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 4d4b4aa..7e19eba 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -7,10 +7,13 @@ import (
"time"
)
-type CompareType int
+// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
+type CompareType = compareResult
+
+type compareResult int
const (
- compareLess CompareType = iota - 1
+ compareLess compareResult = iota - 1
compareEqual
compareGreater
)
@@ -39,7 +42,7 @@ var (
bytesType = reflect.TypeOf([]byte{})
)
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
@@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
}
- return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ if timeObj1.Before(timeObj2) {
+ return compareLess, true
+ }
+ if timeObj1.Equal(timeObj2) {
+ return compareEqual, true
+ }
+ return compareGreater, true
}
case reflect.Slice:
{
@@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
}
- return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
}
case reflect.Uintptr:
{
@@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...)
}
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
@@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
return true
}
-func containsValue(values []CompareType, value CompareType) bool {
+func containsValue(values []compareResult, value compareResult) bool {
for _, v := range values {
if v == value {
return true
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 3ddab10..1906341 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// EqualValuesf asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
@@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick
// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index a84e09b..2162908 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
}
-// EqualValues asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
-// EqualValuesf asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
@@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti
// a.EventuallyWithT(func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor
// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin
return NotContainsf(a.t, s, contains, msg, args...)
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
+func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
return NotEqualf(a.t, expected, actual, msg, args...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAsf(a.t, err, target, msg, args...)
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface
return NotErrorIs(a.t, err, target, msgAndArgs...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 00df62a..1d2f718 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -6,7 +6,7 @@ import (
)
// isOrdered checks that collection contains orderable elements.
-func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
@@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
@@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
@@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
@@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 0b7570f..4e91332 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -19,7 +19,9 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- "gopkg.in/yaml.v3"
+
+ // Wrapper around gopkg.in/yaml.v3
+ "github.com/stretchr/testify/assert/yaml"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful
+// for table driven tests.
+type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
+
// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
@@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
h.Helper()
}
- if !samePointers(expected, actual) {
+ same, ok := samePointers(expected, actual)
+ if !ok {
+ return Fail(t, "Both arguments must be pointers", msgAndArgs...)
+ }
+
+ if !same {
+ // both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
@@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
h.Helper()
}
- if samePointers(expected, actual) {
+ same, ok := samePointers(expected, actual)
+ if !ok {
+ //fails when the arguments are not pointers
+ return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
+ }
+
+ if same {
return Fail(t, fmt.Sprintf(
"Expected and actual point to the same object: %p %#v",
expected, expected), msgAndArgs...)
@@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
return true
}
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
+// samePointers checks if two generic interface objects are pointers of the same
+// type pointing to the same object. It returns two values: same indicating if
+// they are the same type and point to the same object, and ok indicating that
+// both inputs are pointers.
+func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
- return false
+ return false, false //not both are pointers
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
if firstType != secondType {
- return false
+ return false, true // both are pointers, but of different types
}
// compare pointer addresses
- return first == second
+ return first == second, true
}
// formatUnequalValues takes two values of arbitrary types and returns string
@@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string {
return value
}
-// EqualValues asserts that two objects are equal or convertible to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
// assert.EqualValues(t, uint32(123), int32(123))
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
}
- if aType.Kind() == reflect.Ptr {
- aType = aType.Elem()
- }
- if bType.Kind() == reflect.Ptr {
- bType = bType.Elem()
- }
-
- if aType.Kind() != reflect.Struct {
- return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
- }
-
- if bType.Kind() != reflect.Struct {
- return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
- }
-
expected = copyExportedFields(expected)
actual = copyExportedFields(actual)
@@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri
return msg.String()
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
+func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+ }
+
+ if !isList(t, listA, msgAndArgs...) {
+ return Fail(t, "listA is not a list type", msgAndArgs...)
+ }
+ if !isList(t, listB, msgAndArgs...) {
+ return Fail(t, "listB is not a list type", msgAndArgs...)
+ }
+
+ extraA, extraB := diffLists(listA, listB)
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+ }
+
+ return true
+}
+
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
}
+ if math.IsNaN(actualEpsilon) {
+ return Fail(t, "relative error is NaN", msgAndArgs...)
+ }
if actualEpsilon > epsilon {
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
" < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
@@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
-
var r *regexp.Regexp
if rr, ok := rx.(*regexp.Regexp); ok {
r = rr
@@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool {
r = regexp.MustCompile(fmt.Sprint(rx))
}
- return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+ switch v := str.(type) {
+ case []byte:
+ return r.Match(v)
+ case string:
+ return r.MatchString(v)
+ default:
+ return r.MatchString(fmt.Sprint(v))
+ }
}
@@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{
MaxDepth: 10,
}
-type tHelper interface {
+type tHelper = interface {
Helper()
}
@@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
// CollectT implements the TestingT interface and collects all errors.
type CollectT struct {
+ // A slice of errors. Non-nil slice denotes a failure.
+ // If it's non-nil but len(c.errors) == 0, this is also a failure
+ // obtained by direct c.FailNow() call.
errors []error
}
@@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...))
}
-// FailNow panics.
-func (*CollectT) FailNow() {
- panic("Assertion failed")
+// FailNow stops execution by calling runtime.Goexit.
+func (c *CollectT) FailNow() {
+ c.fail()
+ runtime.Goexit()
}
// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
@@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) {
panic("Copy() is deprecated")
}
+func (c *CollectT) fail() {
+ if !c.failed() {
+ c.errors = []error{} // Make it non-nil to mark a failure.
+ }
+}
+
+func (c *CollectT) failed() bool {
+ return c.errors != nil
+}
+
// EventuallyWithT asserts that given condition will be met in waitFor time,
// periodically checking target function each tick. In contrast to Eventually,
// it supplies a CollectT to the condition function, so that the condition
@@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) {
// assert.EventuallyWithT(t, func(c *assert.CollectT) {
// // add assertions as needed; any assertion failure will fail the current tick
// assert.True(c, externalValue, "expected 'externalValue' to be true")
-// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
var lastFinishedTickErrs []error
- ch := make(chan []error, 1)
+ ch := make(chan *CollectT, 1)
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
go func() {
collect := new(CollectT)
defer func() {
- ch <- collect.errors
+ ch <- collect
}()
condition(collect)
}()
- case errs := <-ch:
- if len(errs) == 0 {
+ case collect := <-ch:
+ if !collect.failed() {
return true
}
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
- lastFinishedTickErrs = errs
+ lastFinishedTickErrs = collect.errors
tick = ticker.C
}
}
@@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
), msgAndArgs...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
), msgAndArgs...)
}
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %q\n"+
+ "in chain: %s", target, chain,
+ ), msgAndArgs...)
+}
+
func buildErrorChainString(err error) string {
if err == nil {
return ""
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
new file mode 100644
index 0000000..baa0cc7
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -0,0 +1,25 @@
+//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
+// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
+//
+// This implementation is selected with the testify_yaml_custom build tag.
+//
+// go test -tags testify_yaml_custom
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3].
+//
+// In your test package:
+//
+// import assertYaml "github.com/stretchr/testify/assert/yaml"
+//
+// func init() {
+// assertYaml.Unmarshal = func (in []byte, out interface{}) error {
+// // ...
+// return nil
+// }
+// }
+package yaml
+
+var Unmarshal func(in []byte, out interface{}) error
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
new file mode 100644
index 0000000..b83c6cf
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -0,0 +1,37 @@
+//go:build !testify_yaml_fail && !testify_yaml_custom
+// +build !testify_yaml_fail,!testify_yaml_custom
+
+// Package yaml is just an indirection to handle YAML deserialization.
+//
+// This package is just an indirection that allows the builder to override the
+// indirection with an alternative implementation of this package that uses
+// another implementation of YAML deserialization. This allows to not either not
+// use YAML deserialization at all, or to use another implementation than
+// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
+//
+// Alternative implementations are selected using build tags:
+//
+// - testify_yaml_fail: [Unmarshal] always fails with an error
+// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
+// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
+// [github.com/stretchr/testify/assert.YAMLEqf].
+//
+// Usage:
+//
+// go test -tags testify_yaml_fail
+//
+// You can check with "go list" which implementation is linked:
+//
+// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//
+// [PR #1120]: https://github.com/stretchr/testify/pull/1120
+package yaml
+
+import goyaml "gopkg.in/yaml.v3"
+
+// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
+func Unmarshal(in []byte, out interface{}) error {
+ return goyaml.Unmarshal(in, out)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
new file mode 100644
index 0000000..e78f7df
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -0,0 +1,18 @@
+//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
+// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that always fail.
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3]:
+//
+// go test -tags testify_yaml_fail
+package yaml
+
+import "errors"
+
+var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
+
+func Unmarshal([]byte, interface{}) error {
+ return errNotImplemented
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index bff9c7f..6cbefce 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct {
func computeDistinctFixed(kvs []KeyValue) interface{} {
switch len(kvs) {
case 1:
- ptr := new([1]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [1]KeyValue(kvs)
case 2:
- ptr := new([2]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [2]KeyValue(kvs)
case 3:
- ptr := new([3]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [3]KeyValue(kvs)
case 4:
- ptr := new([4]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [4]KeyValue(kvs)
case 5:
- ptr := new([5]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [5]KeyValue(kvs)
case 6:
- ptr := new([6]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [6]KeyValue(kvs)
case 7:
- ptr := new([7]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [7]KeyValue(kvs)
case 8:
- ptr := new([8]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [8]KeyValue(kvs)
case 9:
- ptr := new([9]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [9]KeyValue(kvs)
case 10:
- ptr := new([10]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
+ return [10]KeyValue(kvs)
default:
return nil
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index b320314..9ea0ecb 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -231,15 +231,27 @@ func (v Value) Emit() string {
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
- return fmt.Sprint(v.asInt64Slice())
+ j, err := json.Marshal(v.asInt64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asInt64Slice())
+ }
+ return string(j)
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
- return fmt.Sprint(v.asFloat64Slice())
+ j, err := json.Marshal(v.asFloat64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
+ }
+ return string(j)
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
- return fmt.Sprint(v.asStringSlice())
+ j, err := json.Marshal(v.asStringSlice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asStringSlice())
+ }
+ return string(j)
case STRING:
return v.stringly
default:
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index df29d96..49a35b1 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes"
import (
"encoding/json"
+ "errors"
"fmt"
"strconv"
)
@@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return nil
}
if c == nil {
- return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+ return errors.New("nil receiver passed to UnmarshalJSON")
}
var x interface{}
@@ -83,7 +84,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return fmt.Errorf("invalid code: %q", ci)
}
- *c = Code(ci)
+ *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
return nil
}
return fmt.Errorf("invalid code: %q", string(b))
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
index f32766e..691d96c 100644
--- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
@@ -14,33 +14,33 @@ import (
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) interface{} {
var zero bool
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) interface{} {
var zero int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) interface{} {
var zero float64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) interface{} {
var zero string
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
- return cp.Elem().Interface()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
@@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero bool
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
+ cpy := make([]bool, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
@@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero int64
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
+ cpy := make([]int64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
@@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero float64
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
+ cpy := make([]float64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
// AsStringSlice converts a string array into a slice into with same elements as array.
@@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string {
if rv.Type().Kind() != reflect.Array {
return nil
}
- var zero string
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]string)
+ cpy := make([]string, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
index 3e7bb3b..b2fe3e4 100644
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -20,11 +20,13 @@ func RawToBool(r uint64) bool {
}
func Int64ToRaw(i int64) uint64 {
- return uint64(i)
+ // Assumes original was a valid int64 (overflow not checked).
+ return uint64(i) // nolint: gosec
}
func RawToInt64(r uint64) int64 {
- return int64(r)
+ // Assumes original was a valid int64 (overflow not checked).
+ return int64(r) // nolint: gosec
}
func Float64ToRaw(f float64) uint64 {
@@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 {
}
func RawPtrToFloat64Ptr(r *uint64) *float64 {
- return (*float64)(unsafe.Pointer(r))
+ // Assumes original was a valid *float64 (overflow not checked).
+ return (*float64)(unsafe.Pointer(r)) // nolint: gosec
}
func RawPtrToInt64Ptr(r *uint64) *int64 {
- return (*int64)(unsafe.Pointer(r))
+ // Assumes original was a valid *int64 (overflow not checked).
+ return (*int64)(unsafe.Pointer(r)) // nolint: gosec
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index 273d58e..9c0b720 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{}
// WithAttributes adds the attributes related to a span life-cycle event.
// These attributes are used to describe the work a Span represents when this
-// option is provided to a Span's start or end events. Otherwise, these
+// option is provided to a Span's start event. Otherwise, these
// attributes provide additional information about the event being recorded
// (e.g. error, state change, processing progress, system event).
//
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
index 5650a17..8c45a71 100644
--- a/vendor/go.opentelemetry.io/otel/trace/context.go
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont
return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
}
-// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
// as a remote SpanContext and as the current Span. The Span implementation
// that wraps rsc is non-recording and performs no operations other than to
// return rsc as the SpanContext from the SpanContext method.
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
index d661c5d..cdbf41d 100644
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -96,7 +96,7 @@ can embed the API interface directly.
This option is not recommended. It will lead to publishing packages that
contain runtime panics when users update to newer versions of
-[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+[go.opentelemetry.io/otel/trace], which may be done with a transitive
dependency.
Finally, an author can embed another implementation in theirs. The embedded
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 0000000..ef85cb7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
+ // Tracer returns a unique Tracer scoped to be used by instrumentation code
+ // to trace computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using spans to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Tracers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Tracer
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Tracer or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Tracer. All implementations will ensure any TracerProvider
+ // configuration changes are propagated to all provided Tracers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 0000000..d3aa476
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
+ // End completes the Span. The Span is considered complete and ready to be
+ // delivered through the rest of the telemetry pipeline after this method
+ // is called. Therefore, updates to the Span are not allowed after this
+ // method has been called.
+ End(options ...SpanEndOption)
+
+ // AddEvent adds an event with the provided name and options.
+ AddEvent(name string, options ...EventOption)
+
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
+ // IsRecording returns the recording state of the Span. It will return
+ // true if the Span is active and events can be recorded.
+ IsRecording() bool
+
+ // RecordError will record err as an exception span event for this span. An
+ // additional call to SetStatus is required if the Status of the Span should
+ // be set to Error, as this method does not change the Span status. If this
+ // span is not being recorded or err is nil then this method does nothing.
+ RecordError(err error, options ...EventOption)
+
+ // SpanContext returns the SpanContext of the Span. The returned SpanContext
+ // is usable even after the End method has been called for the Span.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the Span in the form of a code and a
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
+ SetStatus(code codes.Code, description string)
+
+ // SetName sets the Span name.
+ SetName(name string)
+
+ // SetAttributes sets kv as attributes of the Span. If a key from kv
+ // already exists for an attribute of the Span it will be overwritten with
+ // the value contained in kv.
+ SetAttributes(kv ...attribute.KeyValue)
+
+ // TracerProvider returns a TracerProvider that can be used to generate
+ // additional Spans on the same telemetry pipeline as the current Span.
+ TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+// 1. Batch Processing: A batch of operations may contain operations
+// associated with one or more traces/spans. Since there can only be one
+// parent SpanContext, a Link is used to keep reference to the
+// SpanContext of all operations in the batch.
+// 2. Public Endpoint: A SpanContext for an in incoming client request on a
+// public endpoint should be considered untrusted. In such a case, a new
+// trace with its own identity and sampling decision needs to be created,
+// but this new trace needs to be related to the original trace in some
+// form. A Link is used to keep reference to the original SpanContext and
+// track the relationship.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+ return Link{
+ SpanContext: SpanContextFromContext(ctx),
+ Attributes: attrs,
+ }
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+ // SpanKindUnspecified is an unspecified SpanKind and is not a valid
+ // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+ // if it is received.
+ SpanKindUnspecified SpanKind = 0
+ // SpanKindInternal is a SpanKind for a Span that represents an internal
+ // operation within an application.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer is a SpanKind for a Span that represents the operation
+ // of handling a request from a client.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient is a SpanKind for a Span that represents the operation
+ // of client making a request to a server.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer is a SpanKind for a Span that represents the operation
+ // of a producer sending a message to a message broker. Unlike
+ // SpanKindClient and SpanKindServer, there is often no direct
+ // relationship between this kind of Span and a SpanKindConsumer kind. A
+ // SpanKindProducer Span will end once the message is accepted by the
+ // message broker which might not overlap with the processing of that
+ // message.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer is a SpanKind for a Span that represents the operation
+ // of a consumer receiving a message from a message broker. Like
+ // SpanKindProducer Spans, there is often no direct relationship between
+ // this Span and the Span that produced the message.
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index 28877d4..d49adf6 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace"
import (
"bytes"
- "context"
"encoding/hex"
"encoding/json"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
)
const (
@@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
Remote: sc.remote,
})
}
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Span
-
- // End completes the Span. The Span is considered complete and ready to be
- // delivered through the rest of the telemetry pipeline after this method
- // is called. Therefore, updates to the Span are not allowed after this
- // method has been called.
- End(options ...SpanEndOption)
-
- // AddEvent adds an event with the provided name and options.
- AddEvent(name string, options ...EventOption)
-
- // AddLink adds a link.
- // Adding links at span creation using WithLinks is preferred to calling AddLink
- // later, for contexts that are available during span creation, because head
- // sampling decisions can only consider information present during span creation.
- AddLink(link Link)
-
- // IsRecording returns the recording state of the Span. It will return
- // true if the Span is active and events can be recorded.
- IsRecording() bool
-
- // RecordError will record err as an exception span event for this span. An
- // additional call to SetStatus is required if the Status of the Span should
- // be set to Error, as this method does not change the Span status. If this
- // span is not being recorded or err is nil then this method does nothing.
- RecordError(err error, options ...EventOption)
-
- // SpanContext returns the SpanContext of the Span. The returned SpanContext
- // is usable even after the End method has been called for the Span.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the Span in the form of a code and a
- // description, provided the status hasn't already been set to a higher
- // value before (OK > Error > Unset). The description is only included in a
- // status when the code is for an error.
- SetStatus(code codes.Code, description string)
-
- // SetName sets the Span name.
- SetName(name string)
-
- // SetAttributes sets kv as attributes of the Span. If a key from kv
- // already exists for an attribute of the Span it will be overwritten with
- // the value contained in kv.
- SetAttributes(kv ...attribute.KeyValue)
-
- // TracerProvider returns a TracerProvider that can be used to generate
- // additional Spans on the same telemetry pipeline as the current Span.
- TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-// 1. Batch Processing: A batch of operations may contain operations
-// associated with one or more traces/spans. Since there can only be one
-// parent SpanContext, a Link is used to keep reference to the
-// SpanContext of all operations in the batch.
-// 2. Public Endpoint: A SpanContext for an in incoming client request on a
-// public endpoint should be considered untrusted. In such a case, a new
-// trace with its own identity and sampling decision needs to be created,
-// but this new trace needs to be related to the original trace in some
-// form. A Link is used to keep reference to the original SpanContext and
-// track the relationship.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
- return Link{
- SpanContext: SpanContextFromContext(ctx),
- Attributes: attrs,
- }
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
- // SpanKindUnspecified is an unspecified SpanKind and is not a valid
- // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
- // if it is received.
- SpanKindUnspecified SpanKind = 0
- // SpanKindInternal is a SpanKind for a Span that represents an internal
- // operation within an application.
- SpanKindInternal SpanKind = 1
- // SpanKindServer is a SpanKind for a Span that represents the operation
- // of handling a request from a client.
- SpanKindServer SpanKind = 2
- // SpanKindClient is a SpanKind for a Span that represents the operation
- // of client making a request to a server.
- SpanKindClient SpanKind = 3
- // SpanKindProducer is a SpanKind for a Span that represents the operation
- // of a producer sending a message to a message broker. Unlike
- // SpanKindClient and SpanKindServer, there is often no direct
- // relationship between this kind of Span and a SpanKindConsumer kind. A
- // SpanKindProducer Span will end once the message is accepted by the
- // message broker which might not overlap with the processing of that
- // message.
- SpanKindProducer SpanKind = 4
- // SpanKindConsumer is a SpanKind for a Span that represents the operation
- // of a consumer receiving a message from a message broker. Like
- // SpanKindProducer Spans, there is often no direct relationship between
- // this Span and the Span that produced the message.
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Tracer
-
- // Start creates a span and a context.Context containing the newly-created span.
- //
- // If the context.Context provided in `ctx` contains a Span then the newly-created
- // Span will be a child of that span, otherwise it will be a root span. This behavior
- // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
- // newly-created Span to be a root span even if `ctx` contains a Span.
- //
- // When creating a Span it is recommended to provide all known span attributes using
- // the `WithAttributes()` SpanOption as samplers will only have access to the
- // attributes provided when a Span is created.
- //
- // Any Span that is created MUST also be ended. This is the responsibility of the user.
- // Implementations of this API may leak memory or other resources if Spans are not ended.
- Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.TracerProvider
-
- // Tracer returns a unique Tracer scoped to be used by instrumentation code
- // to trace computational workflows. The scope and identity of that
- // instrumentation code is uniquely defined by the name and options passed.
- //
- // The passed name needs to uniquely identify instrumentation code.
- // Therefore, it is recommended that name is the Go package name of the
- // library providing instrumentation (note: not the code being
- // instrumented). Instrumentation libraries can have multiple versions,
- // therefore, the WithInstrumentationVersion option should be used to
- // distinguish these different codebases. Additionally, instrumentation
- // libraries may sometimes use traces to communicate different domains of
- // workflow data (i.e. using spans to communicate workflow events only). If
- // this is the case, the WithScopeAttributes option should be used to
- // uniquely identify Tracers that handle the different domains of workflow
- // data.
- //
- // If the same name and options are passed multiple times, the same Tracer
- // will be returned (it is up to the implementation if this will be the
- // same underlying instance of that Tracer or not). It is not necessary to
- // call this multiple times with the same name and options to get an
- // up-to-date Tracer. All implementations will ensure any TracerProvider
- // configuration changes are propagated to all provided Tracers.
- //
- // If name is empty, then an implementation defined default name will be
- // used instead.
- //
- // This method is safe to call concurrently.
- Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 0000000..77952d2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
+ // Start creates a span and a context.Context containing the newly-created span.
+ //
+ // If the context.Context provided in `ctx` contains a Span then the newly-created
+ // Span will be a child of that span, otherwise it will be a root span. This behavior
+ // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+ // newly-created Span to be a root span even if `ctx` contains a Span.
+ //
+ // When creating a Span it is recommended to provide all known span attributes using
+ // the `WithAttributes()` SpanOption as samplers will only have access to the
+ // attributes provided when a Span is created.
+ //
+ // Any Span that is created MUST also be ended. This is the responsibility of the user.
+ // Implementations of this API may leak memory or other resources if Spans are not ended.
+ Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index 20b5cf2..dc5e34c 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string {
return ""
}
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+ for _, m := range ts.list {
+ if !f(m.Key, m.Value) {
+ break
+ }
+ }
+}
+
// Insert adds a new list-member defined by the key/value pair to the
// TraceState. If a list-member already exists for the given key, that
// list-member's value is updated. The new or updated list-member is always
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ce8c4bb..57c8143 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,5 +1,5 @@
-# github.com/ClickHouse/ch-go v0.61.5
-## explicit; go 1.21
+# github.com/ClickHouse/ch-go v0.65.0
+## explicit; go 1.22.0
github.com/ClickHouse/ch-go/compress
github.com/ClickHouse/ch-go/proto
# github.com/ClickHouse/clickhouse-go/v2 v2.26.0
@@ -42,7 +42,7 @@ github.com/go-faster/city
# github.com/go-faster/errors v0.7.1
## explicit; go 1.20
github.com/go-faster/errors
-# github.com/go-logr/logr v1.4.1
+# github.com/go-logr/logr v1.4.2
## explicit; go 1.18
github.com/go-logr/logr
# github.com/go-openapi/jsonpointer v0.19.6
@@ -96,8 +96,8 @@ github.com/josharian/native
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.7
-## explicit; go 1.20
+# github.com/klauspost/compress v1.17.11
+## explicit; go 1.21
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -135,7 +135,7 @@ github.com/munnerz/goautoneg
# github.com/paulmach/orb v0.11.1
## explicit; go 1.15
github.com/paulmach/orb
-# github.com/pierrec/lz4/v4 v4.1.21
+# github.com/pierrec/lz4/v4 v4.1.22
## explicit; go 1.14
github.com/pierrec/lz4/v4
github.com/pierrec/lz4/v4/internal/lz4block
@@ -167,6 +167,8 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
+# github.com/rogpeppe/go-internal v1.13.1
+## explicit; go 1.22
# github.com/rs/zerolog v1.32.0
## explicit; go 1.15
github.com/rs/zerolog
@@ -193,23 +195,24 @@ github.com/segmentio/asm/cpu/x86
# github.com/shopspring/decimal v1.4.0
## explicit; go 1.10
github.com/shopspring/decimal
-# github.com/stretchr/testify v1.9.0
+# github.com/stretchr/testify v1.10.0
## explicit; go 1.17
github.com/stretchr/testify/assert
+github.com/stretchr/testify/assert/yaml
# github.com/ti-mo/conntrack v0.5.0
## explicit; go 1.20
github.com/ti-mo/conntrack
# github.com/ti-mo/netfilter v0.5.0
## explicit; go 1.18
github.com/ti-mo/netfilter
-# go.opentelemetry.io/otel v1.26.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel v1.34.0
+## explicit; go 1.22.0
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/codes
go.opentelemetry.io/otel/internal
go.opentelemetry.io/otel/internal/attribute
-# go.opentelemetry.io/otel/trace v1.26.0
-## explicit; go 1.21
+# go.opentelemetry.io/otel/trace v1.34.0
+## explicit; go 1.22.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
# go.uber.org/mock v0.4.0