diff --git a/.github/buildomat/jobs/lint.sh b/.github/buildomat/jobs/lint.sh
index b2387e95d6..1f7dd19166 100755
--- a/.github/buildomat/jobs/lint.sh
+++ b/.github/buildomat/jobs/lint.sh
@@ -9,8 +9,10 @@ set -o pipefail
set -o xtrace
source .github/buildomat/linux-setup.sh
-gmake -j"$(nproc)" lint
-# verify go.mod is up to date
-go mod tidy
-git diff --exit-code
+failed=0
+gmake -j"$(nproc)" lint || ((++failed))
+gmake -j"$(nproc)" generate || ((++failed))
+go mod tidy || ((++failed))
+git diff --exit-code || ((++failed))
+((!failed)) || exit
diff --git a/build/bazelutil/generate_redact_safe.sh b/build/bazelutil/generate_redact_safe.sh
index 3235a87cb1..e94ace2e3a 100755
--- a/build/bazelutil/generate_redact_safe.sh
+++ b/build/bazelutil/generate_redact_safe.sh
@@ -5,11 +5,11 @@ set -euo pipefail
echo "The following types are considered always safe for reporting:"
echo
echo "File | Type"; echo "--|--"
-git grep --recurse-submodules -n '^func \(.*\) SafeValue\(\)' | \
+grep -r -n '^func \(.*\) SafeValue\(\)' | \
grep -v '^vendor/github.com/cockroachdb/redact' | \
sed -E -e 's/^([^:]*):[0-9]+:func \(([^ ]* )?(.*)\) SafeValue.*$$/\1 | \`\3\`/g' | \
LC_ALL=C sort
-git grep --recurse-submodules -n 'redact\.RegisterSafeType' | \
+grep -r -n 'redact\.RegisterSafeType' | \
grep -vE '^([^:]*):[0-9]+:[ ]*//' | \
grep -v '^vendor/github.com/cockroachdb/redact' | \
sed -E -e 's/^([^:]*):[0-9]+:.*redact\.RegisterSafeType\((.*)\).*/\1 | \`\2\`/g' | \
diff --git a/docs/generated/redact_safe.md b/docs/generated/redact_safe.md
index 1f0980f720..a5d812c15d 100644
--- a/docs/generated/redact_safe.md
+++ b/docs/generated/redact_safe.md
@@ -47,7 +47,6 @@ pkg/storage/enginepb/mvcc.go | `TxnEpoch`
pkg/storage/enginepb/mvcc.go | `TxnSeq`
pkg/storage/enginepb/mvcc3.go | `*MVCCStats`
pkg/storage/enginepb/mvcc3.go | `MVCCStatsDelta`
-pkg/streaming/api.go | `StreamID`
pkg/util/hlc/timestamp.go | `ClockTimestamp`
pkg/util/hlc/timestamp.go | `LegacyTimestamp`
pkg/util/hlc/timestamp.go | `Timestamp`
diff --git a/docs/generated/settings/settings-for-tenants.txt b/docs/generated/settings/settings-for-tenants.txt
index 4ca0326bec..f9613b7fed 100644
--- a/docs/generated/settings/settings-for-tenants.txt
+++ b/docs/generated/settings/settings-for-tenants.txt
@@ -11,8 +11,6 @@ cloudstorage.timeout duration 10m0s the timeout for import/export storage operat
cluster.organization string organization name
cluster.preserve_downgrade_option string disable (automatic or manual) cluster version upgrade from the specified version until reset
diagnostics.forced_sql_stat_reset.interval duration 2h0m0s interval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.
-diagnostics.reporting.enabled boolean true enable reporting diagnostic metrics to cockroach labs
-diagnostics.reporting.interval duration 1h0m0s interval at which diagnostics data should be reported
external.graphite.endpoint string if nonempty, push server metrics to the Graphite or Carbon server at the specified host:port
external.graphite.interval duration 10s the interval at which metrics are pushed to Graphite (if enabled)
feature.export.enabled boolean true set to true to enable exports, false to disable; default is true
diff --git a/docs/generated/settings/settings.html b/docs/generated/settings/settings.html
index eb75c8c1de..e5c8f67ab6 100644
--- a/docs/generated/settings/settings.html
+++ b/docs/generated/settings/settings.html
@@ -17,8 +17,6 @@
cluster.preserve_downgrade_option | string | | disable (automatic or manual) cluster version upgrade from the specified version until reset |
diagnostics.active_query_dumps.enabled | boolean | true | experimental: enable dumping of anonymized active queries to disk when node is under memory pressure |
diagnostics.forced_sql_stat_reset.interval | duration | 2h0m0s | interval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H. |
-diagnostics.reporting.enabled | boolean | true | enable reporting diagnostic metrics to cockroach labs |
-diagnostics.reporting.interval | duration | 1h0m0s | interval at which diagnostics data should be reported |
external.graphite.endpoint | string | | if nonempty, push server metrics to the Graphite or Carbon server at the specified host:port |
external.graphite.interval | duration | 10s | the interval at which metrics are pushed to Graphite (if enabled) |
feature.export.enabled | boolean | true | set to true to enable exports, false to disable; default is true |
diff --git a/docs/generated/swagger/spec.json b/docs/generated/swagger/spec.json
index ba335b7592..8e04edda6a 100644
--- a/docs/generated/swagger/spec.json
+++ b/docs/generated/swagger/spec.json
@@ -1007,7 +1007,7 @@
"x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb"
},
"GCPolicy": {
- "description": "TODO(spencer): flesh this out to include maximum number of values\nas well as whether there's an intersection between max values\nand TTL or a union.",
+ "description": "TODO(spencer): flesh this out to include maximum number of values\n\nas well as whether there's an intersection between max values\nand TTL or a union.",
"type": "object",
"title": "GCPolicy defines garbage collection policies which apply to MVCC\nvalues within a zone.",
"properties": {
@@ -1803,7 +1803,7 @@
"$ref": "#/definitions/GCPolicy"
},
"global_reads": {
- "description": "GlobalReads specifies whether transactions operating over the range(s)\nshould be configured to provide non-blocking behavior, meaning that reads\ncan be served consistently from all replicas and do not block on writes. In\nexchange, writes get pushed into the future and must wait on commit to\nensure linearizability. For more, see:\nhttps://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200811_non_blocking_txns.md",
+ "description": "GlobalReads specifies whether transactions operating over the range(s)\nshould be configured to provide non-blocking behavior, meaning that reads\ncan be served consistently from all replicas and do not block on writes. In\nexchange, writes get pushed into the future and must wait on commit to\nensure linearizability. For more, see:\n\nhttps://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200811_non_blocking_txns.md",
"type": "boolean",
"x-go-name": "GlobalReads"
},
diff --git a/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go b/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go
index f139eee400..df8e408090 100644
--- a/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go
+++ b/pkg/kv/kvserver/concurrency/lockstate_interval_btree.go
@@ -32,17 +32,20 @@ const (
// cmp returns a value indicating the sort order relationship between
// a and b. The comparison is performed lexicographically on
-// (a.Key(), a.EndKey(), a.ID())
+//
+// (a.Key(), a.EndKey(), a.ID())
+//
// and
-// (b.Key(), b.EndKey(), b.ID())
+//
+// (b.Key(), b.EndKey(), b.ID())
+//
// tuples.
//
// Given c = cmp(a, b):
//
-// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
-// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
-// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
-//
+// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
+// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
+// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
func cmp(a, b *lockState) int {
c := bytes.Compare(a.Key(), b.Key())
if c != 0 {
@@ -325,21 +328,21 @@ func (n *node) find(item *lockState) (index int, found bool) {
//
// Before:
//
-// +-----------+
-// | x y z |
-// +--/-/-\-\--+
+// +-----------+
+// | x y z |
+// +--/-/-\-\--+
//
// After:
//
-// +-----------+
-// | y |
-// +----/-\----+
-// / \
-// v v
+// +-----------+
+// | y |
+// +----/-\----+
+// / \
+// v v
+//
// +-----------+ +-----------+
// | x | | z |
// +-----------+ +-----------+
-//
func (n *node) split(i int) (*lockState, *node) {
out := n.items[i]
var next *node
@@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *lockState {
// is to minimize the number of key comparisons performed in total. The
// algorithm operates based on the following two invariants maintained by
// augmented interval btree:
-// 1. all items are sorted in the btree based on their start key.
-// 2. all btree nodes maintain the upper bound end key of all items
-// in their subtree.
+// 1. all items are sorted in the btree based on their start key.
+// 2. all btree nodes maintain the upper bound end key of all items
+// in their subtree.
//
// The scan algorithm starts in "unconstrained minimum" and "unconstrained
// maximum" states. To enter a "constrained minimum" state, the scan must reach
@@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *lockState {
//
// The scan algorithm works like a standard btree forward scan with the
// following augmentations:
-// 1. before tranversing the tree, the scan performs a binary search on the
-// root node's items to determine a "soft" lower-bound constraint position
-// and a "hard" upper-bound constraint position in the root's children.
-// 2. when tranversing into a child node in the lower or upper bound constraint
-// position, the constraint is refined by searching the child's items.
-// 3. the initial traversal down the tree follows the left-most children
-// whose upper bound end keys are equal to or greater than the start key
-// of the search range. The children followed will be equal to or less
-// than the soft lower bound constraint.
-// 4. once the initial tranversal completes and the scan is in the left-most
-// btree node whose upper bound overlaps the search range, key comparisons
-// must be performed with each item in the tree. This is necessary because
-// any of these items may have end keys that cause them to overlap with the
-// search range.
-// 5. once the scan reaches the lower bound constraint position (the first item
-// with a start key equal to or greater than the search range's start key),
-// it can begin scaning without performing key comparisons. This is allowed
-// because all items from this point forward will have end keys that are
-// greater than the search range's start key.
-// 6. once the scan reaches the upper bound constraint position, it terminates.
-// It does so because the item at this position is the first item with a
-// start key larger than the search range's end key.
+// 1. before tranversing the tree, the scan performs a binary search on the
+// root node's items to determine a "soft" lower-bound constraint position
+// and a "hard" upper-bound constraint position in the root's children.
+// 2. when tranversing into a child node in the lower or upper bound constraint
+// position, the constraint is refined by searching the child's items.
+// 3. the initial traversal down the tree follows the left-most children
+// whose upper bound end keys are equal to or greater than the start key
+// of the search range. The children followed will be equal to or less
+// than the soft lower bound constraint.
+// 4. once the initial tranversal completes and the scan is in the left-most
+// btree node whose upper bound overlaps the search range, key comparisons
+// must be performed with each item in the tree. This is necessary because
+// any of these items may have end keys that cause them to overlap with the
+// search range.
+// 5. once the scan reaches the lower bound constraint position (the first item
+// with a start key equal to or greater than the search range's start key),
+// it can begin scaning without performing key comparisons. This is allowed
+// because all items from this point forward will have end keys that are
+// greater than the search range's start key.
+// 6. once the scan reaches the upper bound constraint position, it terminates.
+// It does so because the item at this position is the first item with a
+// start key larger than the search range's end key.
type overlapScan struct {
// The "soft" lower-bound constraint.
constrMinN *node
diff --git a/pkg/kv/kvserver/spanlatch/latch_interval_btree.go b/pkg/kv/kvserver/spanlatch/latch_interval_btree.go
index c8f081ab7c..2cfe22ad69 100644
--- a/pkg/kv/kvserver/spanlatch/latch_interval_btree.go
+++ b/pkg/kv/kvserver/spanlatch/latch_interval_btree.go
@@ -32,17 +32,20 @@ const (
// cmp returns a value indicating the sort order relationship between
// a and b. The comparison is performed lexicographically on
-// (a.Key(), a.EndKey(), a.ID())
+//
+// (a.Key(), a.EndKey(), a.ID())
+//
// and
-// (b.Key(), b.EndKey(), b.ID())
+//
+// (b.Key(), b.EndKey(), b.ID())
+//
// tuples.
//
// Given c = cmp(a, b):
//
-// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
-// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
-// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
-//
+// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
+// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
+// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
func cmp(a, b *latch) int {
c := bytes.Compare(a.Key(), b.Key())
if c != 0 {
@@ -325,21 +328,21 @@ func (n *node) find(item *latch) (index int, found bool) {
//
// Before:
//
-// +-----------+
-// | x y z |
-// +--/-/-\-\--+
+// +-----------+
+// | x y z |
+// +--/-/-\-\--+
//
// After:
//
-// +-----------+
-// | y |
-// +----/-\----+
-// / \
-// v v
+// +-----------+
+// | y |
+// +----/-\----+
+// / \
+// v v
+//
// +-----------+ +-----------+
// | x | | z |
// +-----------+ +-----------+
-//
func (n *node) split(i int) (*latch, *node) {
out := n.items[i]
var next *node
@@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *latch {
// is to minimize the number of key comparisons performed in total. The
// algorithm operates based on the following two invariants maintained by
// augmented interval btree:
-// 1. all items are sorted in the btree based on their start key.
-// 2. all btree nodes maintain the upper bound end key of all items
-// in their subtree.
+// 1. all items are sorted in the btree based on their start key.
+// 2. all btree nodes maintain the upper bound end key of all items
+// in their subtree.
//
// The scan algorithm starts in "unconstrained minimum" and "unconstrained
// maximum" states. To enter a "constrained minimum" state, the scan must reach
@@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *latch {
//
// The scan algorithm works like a standard btree forward scan with the
// following augmentations:
-// 1. before tranversing the tree, the scan performs a binary search on the
-// root node's items to determine a "soft" lower-bound constraint position
-// and a "hard" upper-bound constraint position in the root's children.
-// 2. when tranversing into a child node in the lower or upper bound constraint
-// position, the constraint is refined by searching the child's items.
-// 3. the initial traversal down the tree follows the left-most children
-// whose upper bound end keys are equal to or greater than the start key
-// of the search range. The children followed will be equal to or less
-// than the soft lower bound constraint.
-// 4. once the initial tranversal completes and the scan is in the left-most
-// btree node whose upper bound overlaps the search range, key comparisons
-// must be performed with each item in the tree. This is necessary because
-// any of these items may have end keys that cause them to overlap with the
-// search range.
-// 5. once the scan reaches the lower bound constraint position (the first item
-// with a start key equal to or greater than the search range's start key),
-// it can begin scaning without performing key comparisons. This is allowed
-// because all items from this point forward will have end keys that are
-// greater than the search range's start key.
-// 6. once the scan reaches the upper bound constraint position, it terminates.
-// It does so because the item at this position is the first item with a
-// start key larger than the search range's end key.
+// 1. before tranversing the tree, the scan performs a binary search on the
+// root node's items to determine a "soft" lower-bound constraint position
+// and a "hard" upper-bound constraint position in the root's children.
+// 2. when tranversing into a child node in the lower or upper bound constraint
+// position, the constraint is refined by searching the child's items.
+// 3. the initial traversal down the tree follows the left-most children
+// whose upper bound end keys are equal to or greater than the start key
+// of the search range. The children followed will be equal to or less
+// than the soft lower bound constraint.
+// 4. once the initial tranversal completes and the scan is in the left-most
+// btree node whose upper bound overlaps the search range, key comparisons
+// must be performed with each item in the tree. This is necessary because
+// any of these items may have end keys that cause them to overlap with the
+// search range.
+// 5. once the scan reaches the lower bound constraint position (the first item
+// with a start key equal to or greater than the search range's start key),
+// it can begin scaning without performing key comparisons. This is allowed
+// because all items from this point forward will have end keys that are
+// greater than the search range's start key.
+// 6. once the scan reaches the upper bound constraint position, it terminates.
+// It does so because the item at this position is the first item with a
+// start key larger than the search range's end key.
type overlapScan struct {
// The "soft" lower-bound constraint.
constrMinN *node
diff --git a/pkg/spanconfig/spanconfigstore/entry_interval_btree.go b/pkg/spanconfig/spanconfigstore/entry_interval_btree.go
index 1118243937..79df86f0fe 100644
--- a/pkg/spanconfig/spanconfigstore/entry_interval_btree.go
+++ b/pkg/spanconfig/spanconfigstore/entry_interval_btree.go
@@ -32,17 +32,20 @@ const (
// cmp returns a value indicating the sort order relationship between
// a and b. The comparison is performed lexicographically on
-// (a.Key(), a.EndKey(), a.ID())
+//
+// (a.Key(), a.EndKey(), a.ID())
+//
// and
-// (b.Key(), b.EndKey(), b.ID())
+//
+// (b.Key(), b.EndKey(), b.ID())
+//
// tuples.
//
// Given c = cmp(a, b):
//
-// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
-// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
-// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
-//
+// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
+// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
+// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
func cmp(a, b *entry) int {
c := bytes.Compare(a.Key(), b.Key())
if c != 0 {
@@ -325,21 +328,21 @@ func (n *node) find(item *entry) (index int, found bool) {
//
// Before:
//
-// +-----------+
-// | x y z |
-// +--/-/-\-\--+
+// +-----------+
+// | x y z |
+// +--/-/-\-\--+
//
// After:
//
-// +-----------+
-// | y |
-// +----/-\----+
-// / \
-// v v
+// +-----------+
+// | y |
+// +----/-\----+
+// / \
+// v v
+//
// +-----------+ +-----------+
// | x | | z |
// +-----------+ +-----------+
-//
func (n *node) split(i int) (*entry, *node) {
out := n.items[i]
var next *node
@@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *entry {
// is to minimize the number of key comparisons performed in total. The
// algorithm operates based on the following two invariants maintained by
// augmented interval btree:
-// 1. all items are sorted in the btree based on their start key.
-// 2. all btree nodes maintain the upper bound end key of all items
-// in their subtree.
+// 1. all items are sorted in the btree based on their start key.
+// 2. all btree nodes maintain the upper bound end key of all items
+// in their subtree.
//
// The scan algorithm starts in "unconstrained minimum" and "unconstrained
// maximum" states. To enter a "constrained minimum" state, the scan must reach
@@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *entry {
//
// The scan algorithm works like a standard btree forward scan with the
// following augmentations:
-// 1. before tranversing the tree, the scan performs a binary search on the
-// root node's items to determine a "soft" lower-bound constraint position
-// and a "hard" upper-bound constraint position in the root's children.
-// 2. when tranversing into a child node in the lower or upper bound constraint
-// position, the constraint is refined by searching the child's items.
-// 3. the initial traversal down the tree follows the left-most children
-// whose upper bound end keys are equal to or greater than the start key
-// of the search range. The children followed will be equal to or less
-// than the soft lower bound constraint.
-// 4. once the initial tranversal completes and the scan is in the left-most
-// btree node whose upper bound overlaps the search range, key comparisons
-// must be performed with each item in the tree. This is necessary because
-// any of these items may have end keys that cause them to overlap with the
-// search range.
-// 5. once the scan reaches the lower bound constraint position (the first item
-// with a start key equal to or greater than the search range's start key),
-// it can begin scaning without performing key comparisons. This is allowed
-// because all items from this point forward will have end keys that are
-// greater than the search range's start key.
-// 6. once the scan reaches the upper bound constraint position, it terminates.
-// It does so because the item at this position is the first item with a
-// start key larger than the search range's end key.
+// 1. before tranversing the tree, the scan performs a binary search on the
+// root node's items to determine a "soft" lower-bound constraint position
+// and a "hard" upper-bound constraint position in the root's children.
+// 2. when tranversing into a child node in the lower or upper bound constraint
+// position, the constraint is refined by searching the child's items.
+// 3. the initial traversal down the tree follows the left-most children
+// whose upper bound end keys are equal to or greater than the start key
+// of the search range. The children followed will be equal to or less
+// than the soft lower bound constraint.
+// 4. once the initial tranversal completes and the scan is in the left-most
+// btree node whose upper bound overlaps the search range, key comparisons
+// must be performed with each item in the tree. This is necessary because
+// any of these items may have end keys that cause them to overlap with the
+// search range.
+// 5. once the scan reaches the lower bound constraint position (the first item
+// with a start key equal to or greater than the search range's start key),
+// it can begin scaning without performing key comparisons. This is allowed
+// because all items from this point forward will have end keys that are
+// greater than the search range's start key.
+// 6. once the scan reaches the upper bound constraint position, it terminates.
+// It does so because the item at this position is the first item with a
+// start key larger than the search range's end key.
type overlapScan struct {
// The "soft" lower-bound constraint.
constrMinN *node
diff --git a/pkg/util/interval/generic/example_interval_btree.go b/pkg/util/interval/generic/example_interval_btree.go
index 5f803ef5c9..b340d6a145 100644
--- a/pkg/util/interval/generic/example_interval_btree.go
+++ b/pkg/util/interval/generic/example_interval_btree.go
@@ -32,17 +32,20 @@ const (
// cmp returns a value indicating the sort order relationship between
// a and b. The comparison is performed lexicographically on
-// (a.Key(), a.EndKey(), a.ID())
+//
+// (a.Key(), a.EndKey(), a.ID())
+//
// and
-// (b.Key(), b.EndKey(), b.ID())
+//
+// (b.Key(), b.EndKey(), b.ID())
+//
// tuples.
//
// Given c = cmp(a, b):
//
-// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
-// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
-// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
-//
+// c == -1 if (a.Key(), a.EndKey(), a.ID()) < (b.Key(), b.EndKey(), b.ID())
+// c == 0 if (a.Key(), a.EndKey(), a.ID()) == (b.Key(), b.EndKey(), b.ID())
+// c == 1 if (a.Key(), a.EndKey(), a.ID()) > (b.Key(), b.EndKey(), b.ID())
func cmp(a, b *example) int {
c := bytes.Compare(a.Key(), b.Key())
if c != 0 {
@@ -325,21 +328,21 @@ func (n *node) find(item *example) (index int, found bool) {
//
// Before:
//
-// +-----------+
-// | x y z |
-// +--/-/-\-\--+
+// +-----------+
+// | x y z |
+// +--/-/-\-\--+
//
// After:
//
-// +-----------+
-// | y |
-// +----/-\----+
-// / \
-// v v
+// +-----------+
+// | y |
+// +----/-\----+
+// / \
+// v v
+//
// +-----------+ +-----------+
// | x | | z |
// +-----------+ +-----------+
-//
func (n *node) split(i int) (*example, *node) {
out := n.items[i]
var next *node
@@ -1004,9 +1007,9 @@ func (i *iterator) Cur() *example {
// is to minimize the number of key comparisons performed in total. The
// algorithm operates based on the following two invariants maintained by
// augmented interval btree:
-// 1. all items are sorted in the btree based on their start key.
-// 2. all btree nodes maintain the upper bound end key of all items
-// in their subtree.
+// 1. all items are sorted in the btree based on their start key.
+// 2. all btree nodes maintain the upper bound end key of all items
+// in their subtree.
//
// The scan algorithm starts in "unconstrained minimum" and "unconstrained
// maximum" states. To enter a "constrained minimum" state, the scan must reach
@@ -1021,28 +1024,28 @@ func (i *iterator) Cur() *example {
//
// The scan algorithm works like a standard btree forward scan with the
// following augmentations:
-// 1. before tranversing the tree, the scan performs a binary search on the
-// root node's items to determine a "soft" lower-bound constraint position
-// and a "hard" upper-bound constraint position in the root's children.
-// 2. when tranversing into a child node in the lower or upper bound constraint
-// position, the constraint is refined by searching the child's items.
-// 3. the initial traversal down the tree follows the left-most children
-// whose upper bound end keys are equal to or greater than the start key
-// of the search range. The children followed will be equal to or less
-// than the soft lower bound constraint.
-// 4. once the initial tranversal completes and the scan is in the left-most
-// btree node whose upper bound overlaps the search range, key comparisons
-// must be performed with each item in the tree. This is necessary because
-// any of these items may have end keys that cause them to overlap with the
-// search range.
-// 5. once the scan reaches the lower bound constraint position (the first item
-// with a start key equal to or greater than the search range's start key),
-// it can begin scaning without performing key comparisons. This is allowed
-// because all items from this point forward will have end keys that are
-// greater than the search range's start key.
-// 6. once the scan reaches the upper bound constraint position, it terminates.
-// It does so because the item at this position is the first item with a
-// start key larger than the search range's end key.
+// 1. before tranversing the tree, the scan performs a binary search on the
+// root node's items to determine a "soft" lower-bound constraint position
+// and a "hard" upper-bound constraint position in the root's children.
+// 2. when tranversing into a child node in the lower or upper bound constraint
+// position, the constraint is refined by searching the child's items.
+// 3. the initial traversal down the tree follows the left-most children
+// whose upper bound end keys are equal to or greater than the start key
+// of the search range. The children followed will be equal to or less
+// than the soft lower bound constraint.
+// 4. once the initial tranversal completes and the scan is in the left-most
+// btree node whose upper bound overlaps the search range, key comparisons
+// must be performed with each item in the tree. This is necessary because
+// any of these items may have end keys that cause them to overlap with the
+// search range.
+// 5. once the scan reaches the lower bound constraint position (the first item
+// with a start key equal to or greater than the search range's start key),
+// it can begin scaning without performing key comparisons. This is allowed
+// because all items from this point forward will have end keys that are
+// greater than the search range's start key.
+// 6. once the scan reaches the upper bound constraint position, it terminates.
+// It does so because the item at this position is the first item with a
+// start key larger than the search range's end key.
type overlapScan struct {
// The "soft" lower-bound constraint.
constrMinN *node
diff --git a/pkg/util/timeutil/lowercase_timezones_generated.go b/pkg/util/timeutil/lowercase_timezones_generated.go
index a47e32063c..1e9a55c8e8 100644
--- a/pkg/util/timeutil/lowercase_timezones_generated.go
+++ b/pkg/util/timeutil/lowercase_timezones_generated.go
@@ -108,6 +108,7 @@ var lowercaseTimezones = map[string]string{
`america/cayman`: `America/Cayman`,
`america/chicago`: `America/Chicago`,
`america/chihuahua`: `America/Chihuahua`,
+ `america/ciudad_juarez`: `America/Ciudad_Juarez`,
`america/coral_harbour`: `America/Coral_Harbour`,
`america/cordoba`: `America/Cordoba`,
`america/costa_rica`: `America/Costa_Rica`,
@@ -462,6 +463,7 @@ var lowercaseTimezones = map[string]string{
`europe/kaliningrad`: `Europe/Kaliningrad`,
`europe/kiev`: `Europe/Kiev`,
`europe/kirov`: `Europe/Kirov`,
+ `europe/kyiv`: `Europe/Kyiv`,
`europe/lisbon`: `Europe/Lisbon`,
`europe/ljubljana`: `Europe/Ljubljana`,
`europe/london`: `Europe/London`,
@@ -557,6 +559,7 @@ var lowercaseTimezones = map[string]string{
`pacific/guam`: `Pacific/Guam`,
`pacific/honolulu`: `Pacific/Honolulu`,
`pacific/johnston`: `Pacific/Johnston`,
+ `pacific/kanton`: `Pacific/Kanton`,
`pacific/kiritimati`: `Pacific/Kiritimati`,
`pacific/kosrae`: `Pacific/Kosrae`,
`pacific/kwajalein`: `Pacific/Kwajalein`,