diff --git a/CHANGELOG.md b/CHANGELOG.md index 9880c3c6d2b..2df1f596a6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * [CHANGE] StoreGateway/Alertmanager: Add default 5s connection timeout on client. #6603 * [CHANGE] Ingester: Remove EnableNativeHistograms config flag and instead gate keep through new per-tenant limit at ingestion. #6718 * [CHANGE] Validate a tenantID when to use a single tenant resolver. #6727 +* [FEATURE] Alertmanager/Ruler: Introduce a user scanner to reduce the number of list calls to object storage. #6999 * [FEATURE] Distributor: Add an experimental `-distributor.otlp.enable-type-and-unit-labels` flag to add `__type__` and `__unit__` labels for OTLP metrics. #6969 * [FEATURE] Distributor: Add an experimental `-distributor.otlp.allow-delta-temporality` flag to ingest delta temporality otlp metrics. #6934 * [FEATURE] Query Frontend: Add dynamic interval size for query splitting. This is enabled by configuring experimental flags `querier.max-shards-per-query` and/or `querier.max-fetched-data-duration-per-query`. The split interval size is dynamically increased to maintain a number of shards and total duration fetched below the configured values. #6458 diff --git a/docs/blocks-storage/querier.md b/docs/blocks-storage/querier.md index 855ff5c9028..d7c446d727d 100644 --- a/docs/blocks-storage/querier.md +++ b/docs/blocks-storage/querier.md @@ -1927,6 +1927,11 @@ blocks_storage: # CLI flag: -blocks-storage.users-scanner.user-index.max-stale-period [max_stale_period: | default = 1h] + # How frequently user index file is updated, it only take effect when user + # scan stratehy is user_index. + # CLI flag: -blocks-storage.users-scanner.user-index.cleanup-interval + [clean_up_interval: | default = 15m] + # TTL of the cached users. 0 disables caching and relies on caching at # bucket client level. # CLI flag: -blocks-storage.users-scanner.cache-ttl diff --git a/docs/blocks-storage/store-gateway.md b/docs/blocks-storage/store-gateway.md index 506cf0f32a1..d4ba5493041 100644 --- a/docs/blocks-storage/store-gateway.md +++ b/docs/blocks-storage/store-gateway.md @@ -2013,6 +2013,11 @@ blocks_storage: # CLI flag: -blocks-storage.users-scanner.user-index.max-stale-period [max_stale_period: | default = 1h] + # How frequently user index file is updated, it only take effect when user + # scan stratehy is user_index. + # CLI flag: -blocks-storage.users-scanner.user-index.cleanup-interval + [clean_up_interval: | default = 15m] + # TTL of the cached users. 0 disables caching and relies on caching at # bucket client level. # CLI flag: -blocks-storage.users-scanner.cache-ttl diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index a642c4f3241..d649ce23a18 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -981,6 +981,26 @@ local: # Path at which alertmanager configurations are stored. # CLI flag: -alertmanager-storage.local.path [path: | default = ""] + +users_scanner: + # Strategy to use to scan users. Supported values are: list, user_index. + # CLI flag: -alertmanager-storage.users-scanner.strategy + [strategy: | default = "list"] + + # Maximum period of time to consider the user index as stale. Fall back to the + # base scanner if stale. Only valid when strategy is user_index. + # CLI flag: -alertmanager-storage.users-scanner.user-index.max-stale-period + [max_stale_period: | default = 1h] + + # How frequently user index file is updated, it only take effect when user + # scan stratehy is user_index. + # CLI flag: -alertmanager-storage.users-scanner.user-index.cleanup-interval + [clean_up_interval: | default = 15m] + + # TTL of the cached users. 0 disables caching and relies on caching at bucket + # client level. + # CLI flag: -alertmanager-storage.users-scanner.cache-ttl + [cache_ttl: | default = 0s] ``` ### `blocks_storage_config` @@ -2593,6 +2613,11 @@ users_scanner: # CLI flag: -blocks-storage.users-scanner.user-index.max-stale-period [max_stale_period: | default = 1h] + # How frequently user index file is updated, it only take effect when user + # scan stratehy is user_index. + # CLI flag: -blocks-storage.users-scanner.user-index.cleanup-interval + [clean_up_interval: | default = 15m] + # TTL of the cached users. 0 disables caching and relies on caching at bucket # client level. # CLI flag: -blocks-storage.users-scanner.cache-ttl @@ -5803,6 +5828,26 @@ local: # Directory to scan for rules # CLI flag: -ruler-storage.local.directory [directory: | default = ""] + +users_scanner: + # Strategy to use to scan users. Supported values are: list, user_index. + # CLI flag: -ruler-storage.users-scanner.strategy + [strategy: | default = "list"] + + # Maximum period of time to consider the user index as stale. Fall back to the + # base scanner if stale. Only valid when strategy is user_index. + # CLI flag: -ruler-storage.users-scanner.user-index.max-stale-period + [max_stale_period: | default = 1h] + + # How frequently user index file is updated, it only take effect when user + # scan stratehy is user_index. + # CLI flag: -ruler-storage.users-scanner.user-index.cleanup-interval + [clean_up_interval: | default = 15m] + + # TTL of the cached users. 0 disables caching and relies on caching at bucket + # client level. + # CLI flag: -ruler-storage.users-scanner.cache-ttl + [cache_ttl: | default = 0s] ``` ### `runtime_configuration_storage_config` diff --git a/integration/alertmanager_test.go b/integration/alertmanager_test.go index f38442b8eb1..8aa0a6d1007 100644 --- a/integration/alertmanager_test.go +++ b/integration/alertmanager_test.go @@ -69,6 +69,41 @@ func TestAlertmanager(t *testing.T) { assertServiceMetricsPrefixes(t, AlertManager, alertmanager) } +func TestAlertmanagerWithUserIndexUpdater(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs/user-1.yaml", []byte(cortexAlertmanagerUserConfigYaml))) + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, alertsBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + baseFlags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags()) + flags := mergeFlags(baseFlags, AlertmanagerShardingFlags(consul.NetworkHTTPEndpoint(), 1), map[string]string{ + "-alertmanager-storage.users-scanner.strategy": "user_index", + "-alertmanager-storage.users-scanner.user-index.cleanup-interval": "15s", + "-alertmanager.configs.poll-interval": "5s", + }) + + am := e2ecortex.NewAlertmanager( + "alertmanager", + flags, + "", + ) + + require.NoError(t, s.StartAndWaitReady(am)) + // To make sure user index file is updated/scanned + require.NoError(t, am.WaitSumMetricsWithOptions(e2e.Greater(float64(0)), []string{"cortex_user_index_last_successful_update_timestamp_seconds"}), + e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "component", "alertmanager")), + ) + require.NoError(t, am.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(float64(1)), []string{"cortex_user_index_scan_succeeded_total"}), + e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "component", "alertmanager")), + ) +} + func TestAlertmanagerStoreAPI(t *testing.T) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 48bdaff5514..3f172c33901 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -143,6 +143,65 @@ func TestRulerAPI(t *testing.T) { } } +func TestRulerWithUserIndexUpdater(t *testing.T) { + s, err := e2e.NewScenario(networkName) + require.NoError(t, err) + defer s.Close() + + // Start dependencies. + consul := e2edb.NewConsul() + minio := e2edb.NewMinio(9000, rulestoreBucketName) + require.NoError(t, s.StartAndWaitReady(consul, minio)) + + // Configure the ruler. + rulerFlags := mergeFlags( + BlocksStorageFlags(), + RulerFlags(), + RulerShardingFlags(consul.NetworkHTTPEndpoint()), + map[string]string{ + "-ruler.sharding-strategy": "shuffle-sharding", + "-ruler-storage.users-scanner.strategy": "user_index", + "-ruler-storage.users-scanner.user-index.cleanup-interval": "15s", + "-ruler.tenant-shard-size": "1", + // Since we're not going to run any rule, we don't need the + // store-gateway to be configured to a valid address. + "-querier.store-gateway-addresses": "localhost:12345", + // Enable the bucket index so we can skip the initial bucket scan. + "-blocks-storage.bucket-store.bucket-index.enabled": "true", + "-ruler.poll-interval": "2s", + "-log.level": "info", + }, + ) + + ruler := e2ecortex.NewRuler( + "ruler", + consul.NetworkHTTPEndpoint(), + rulerFlags, + "", + ) + + require.NoError(t, s.StartAndWaitReady(ruler)) + + // Create a client with the ruler address configured + c, err := e2ecortex.NewClient("", "", "", ruler.HTTPEndpoint(), "user-1") + require.NoError(t, err) + + ruleGroup := createTestRuleGroup(t) + ns := "ns" + + // Set the rule group into the ruler + require.NoError(t, c.SetRuleGroup(ruleGroup, ns)) + + // To make sure user index file is updated/scanned + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.Greater(float64(0)), []string{"cortex_user_index_last_successful_update_timestamp_seconds"}), + e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "component", "ruler")), + ) + + require.NoError(t, ruler.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(float64(1)), []string{"cortex_user_index_scan_succeeded_total"}), + e2e.WithLabelMatchers(labels.MustNewMatcher(labels.MatchEqual, "component", "ruler")), + ) +} + func TestRulerAPISingleBinary(t *testing.T) { s, err := e2e.NewScenario(networkName) require.NoError(t, err) diff --git a/pkg/alertmanager/alertstore/bucketclient/bucket_client.go b/pkg/alertmanager/alertstore/bucketclient/bucket_client.go index 7a2d3dad2bd..989df29ebe0 100644 --- a/pkg/alertmanager/alertstore/bucketclient/bucket_client.go +++ b/pkg/alertmanager/alertstore/bucketclient/bucket_client.go @@ -3,6 +3,7 @@ package bucketclient import ( "bytes" "context" + "fmt" "io" "strings" "sync" @@ -10,14 +11,15 @@ import ( "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" - - "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/thanos-io/thanos/pkg/extprom" "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/runutil" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -45,27 +47,54 @@ type BucketAlertStore struct { amBucket objstore.Bucket cfgProvider bucket.TenantConfigProvider logger log.Logger + + usersScanner users.Scanner + userIndexUpdater *users.UserIndexUpdater } -func NewBucketAlertStore(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *BucketAlertStore { - return &BucketAlertStore{ - alertsBucket: bucket.NewPrefixedBucketClient(bkt, alertsPrefix), - amBucket: bucket.NewPrefixedBucketClient(bkt, alertmanagerPrefix), - cfgProvider: cfgProvider, - logger: logger, +func NewBucketAlertStore(bkt objstore.InstrumentedBucket, userScannerCfg users.UsersScannerConfig, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (*BucketAlertStore, error) { + alertBucket := bucket.NewPrefixedBucketClient(bkt, alertsPrefix) + + regWithComponent := extprom.WrapRegistererWith(prometheus.Labels{"component": "alertmanager"}, reg) + usersScanner, err := users.NewScanner(userScannerCfg, alertBucket, logger, regWithComponent) + if err != nil { + return nil, errors.Wrap(err, "unable to initialize alertmanager users scanner") } + + var userIndexUpdater *users.UserIndexUpdater + if userScannerCfg.Strategy == users.UserScanStrategyUserIndex { + // We hardcode strategy to be list so can ignore error. + baseScanner, _ := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, + }, alertBucket, logger, regWithComponent) + userIndexUpdater = users.NewUserIndexUpdater(alertBucket, userScannerCfg.CleanUpInterval, baseScanner, regWithComponent) + } + + return &BucketAlertStore{ + alertsBucket: alertBucket, + amBucket: bucket.NewPrefixedBucketClient(bkt, alertmanagerPrefix), + cfgProvider: cfgProvider, + logger: logger, + usersScanner: usersScanner, + userIndexUpdater: userIndexUpdater, + }, nil +} + +// GetUserIndexUpdater implements alertstore.AlertStore. +func (s *BucketAlertStore) GetUserIndexUpdater() *users.UserIndexUpdater { + return s.userIndexUpdater } // ListAllUsers implements alertstore.AlertStore. func (s *BucketAlertStore) ListAllUsers(ctx context.Context) ([]string, error) { - var userIDs []string - - err := s.alertsBucket.Iter(ctx, "", func(key string) error { - userIDs = append(userIDs, key) - return nil - }) - - return userIDs, err + active, deleting, _, err := s.usersScanner.ScanUsers(ctx) + if err != nil { + return nil, fmt.Errorf("unable to list users in alertmanager store bucket: %w", err) + } + userIDs := make([]string, 0, len(active)+len(deleting)) + userIDs = append(userIDs, active...) + userIDs = append(userIDs, deleting...) + return userIDs, nil } // GetAlertConfigs implements alertstore.AlertStore. @@ -217,5 +246,5 @@ func (s *BucketAlertStore) getUserBucket(userID string) objstore.Bucket { func (s *BucketAlertStore) getAlertmanagerUserBucket(userID string) objstore.Bucket { uBucket := bucket.NewUserBucketClient(userID, s.amBucket, s.cfgProvider) - return uBucket.WithExpectedErrs(tsdb.IsOneOfTheExpectedErrors(uBucket.IsAccessDeniedErr, uBucket.IsObjNotFoundErr)) + return uBucket.WithExpectedErrs(bucket.IsOneOfTheExpectedErrors(uBucket.IsAccessDeniedErr, uBucket.IsObjNotFoundErr)) } diff --git a/pkg/alertmanager/alertstore/config.go b/pkg/alertmanager/alertstore/config.go index 5d32e6dd9e1..9fb25d7578f 100644 --- a/pkg/alertmanager/alertstore/config.go +++ b/pkg/alertmanager/alertstore/config.go @@ -8,13 +8,15 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local" "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/storage/bucket" + "github.com/cortexproject/cortex/pkg/util/users" ) -// Config configures a the alertmanager storage backend. +// Config configures the alertmanager storage backend. type Config struct { bucket.Config `yaml:",inline"` - ConfigDB client.Config `yaml:"configdb"` - Local local.StoreConfig `yaml:"local"` + ConfigDB client.Config `yaml:"configdb"` + Local local.StoreConfig `yaml:"local"` + UsersScanner users.UsersScannerConfig `yaml:"users_scanner"` } // RegisterFlags registers the backend storage config. @@ -25,6 +27,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ConfigDB.RegisterFlagsWithPrefix(prefix, f) cfg.Local.RegisterFlagsWithPrefix(prefix, f) cfg.RegisterFlagsWithPrefix(prefix, f) + cfg.UsersScanner.RegisterFlagsWithPrefix(prefix, f) } // IsFullStateSupported returns if the given configuration supports access to FullState objects. diff --git a/pkg/alertmanager/alertstore/configdb/store.go b/pkg/alertmanager/alertstore/configdb/store.go index 880af40c072..b7eeb918b00 100644 --- a/pkg/alertmanager/alertstore/configdb/store.go +++ b/pkg/alertmanager/alertstore/configdb/store.go @@ -7,6 +7,7 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/configs/userconfig" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -34,6 +35,11 @@ func NewStore(c client.Client) *Store { } } +// GetUserIndexUpdater implements alertstore.AlertStore. +func (c *Store) GetUserIndexUpdater() *users.UserIndexUpdater { + return nil +} + // ListAllUsers implements alertstore.AlertStore. func (c *Store) ListAllUsers(ctx context.Context) ([]string, error) { configs, err := c.reloadConfigs(ctx) diff --git a/pkg/alertmanager/alertstore/local/store.go b/pkg/alertmanager/alertstore/local/store.go index 3d619dbad3a..3d224319581 100644 --- a/pkg/alertmanager/alertstore/local/store.go +++ b/pkg/alertmanager/alertstore/local/store.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/alertmanager/config" "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -43,6 +44,11 @@ func NewStore(cfg StoreConfig) (*Store, error) { return &Store{cfg}, nil } +// GetUserIndexUpdater implements alertstore.AlertStore. +func (f *Store) GetUserIndexUpdater() *users.UserIndexUpdater { + return nil +} + // ListAllUsers implements alertstore.AlertStore. func (f *Store) ListAllUsers(_ context.Context) ([]string, error) { configs, err := f.reloadConfigs() diff --git a/pkg/alertmanager/alertstore/store.go b/pkg/alertmanager/alertstore/store.go index 9b02d11cd16..12d2b5877f1 100644 --- a/pkg/alertmanager/alertstore/store.go +++ b/pkg/alertmanager/alertstore/store.go @@ -2,9 +2,12 @@ package alertstore import ( "context" + "fmt" + "io" "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient" @@ -12,6 +15,11 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/local" "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/storage/bucket" + "github.com/cortexproject/cortex/pkg/util/users" +) + +var ( + errAccessDenied = fmt.Errorf("access denied") ) // AlertStore stores and configures users rule configs @@ -46,6 +54,9 @@ type AlertStore interface { // DeleteFullState deletes the alertmanager state for an user. // If state for the user doesn't exist, no error is reported. DeleteFullState(ctx context.Context, user string) error + + // GetUserIndexUpdater is getter for UserIndexUpdater + GetUserIndexUpdater() *users.UserIndexUpdater } // NewAlertStore returns a alertmanager store backend client based on the provided cfg. @@ -67,5 +78,29 @@ func NewAlertStore(ctx context.Context, cfg Config, cfgProvider bucket.TenantCon return nil, err } - return bucketclient.NewBucketAlertStore(bucketClient, cfgProvider, logger), nil + return bucketclient.NewBucketAlertStore(bucketClient, cfg.UsersScanner, cfgProvider, logger, reg) +} + +type MockBucket struct { + objstore.Bucket + err error +} + +func (m *MockBucket) WithExpectedErrs(expectedFunc objstore.IsOpFailureExpectedFunc) objstore.Bucket { + return m +} + +func (m *MockBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailureExpectedFunc) objstore.BucketReader { + return m +} + +func (m *MockBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + if m.err != nil { + return nil, m.err + } + return m.Bucket.Get(ctx, name) +} + +func (m *MockBucket) IsAccessDeniedErr(err error) bool { + return err == errAccessDenied } diff --git a/pkg/alertmanager/alertstore/store_test.go b/pkg/alertmanager/alertstore/store_test.go index 2796b6ed041..ed8ef5970a0 100644 --- a/pkg/alertmanager/alertstore/store_test.go +++ b/pkg/alertmanager/alertstore/store_test.go @@ -2,26 +2,22 @@ package alertstore import ( "context" - "fmt" - "io" "testing" "github.com/go-kit/log" "github.com/prometheus/alertmanager/cluster/clusterpb" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient" -) - -var ( - errAccessDenied = fmt.Errorf("access denied") + "github.com/cortexproject/cortex/pkg/util/users" ) func TestAlertStore_ListAllUsers(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *MockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -42,11 +38,20 @@ func TestAlertStore_ListAllUsers(t *testing.T) { require.NoError(t, err) assert.ElementsMatch(t, []string{"user-1", "user-2"}, users) } + + { + // delete user-1 alertmanager config + require.NoError(t, store.DeleteAlertConfig(ctx, "user-1")) + + users, err := store.ListAllUsers(ctx) + require.NoError(t, err) + assert.ElementsMatch(t, []string{"user-2"}, users) + } }) } func TestAlertStore_SetAndGetAlertConfig(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *MockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -84,7 +89,7 @@ func TestAlertStore_SetAndGetAlertConfig(t *testing.T) { } func TestStore_GetAlertConfigs(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *MockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -129,7 +134,7 @@ func TestStore_GetAlertConfigs(t *testing.T) { } func TestAlertStore_DeleteAlertConfig(t *testing.T) { - runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *mockBucket, client any) { + runForEachAlertStore(t, func(t *testing.T, store AlertStore, m *MockBucket, client any) { ctx := context.Background() user1Cfg := alertspb.AlertConfigDesc{User: "user-1", RawConfig: "content-1"} user2Cfg := alertspb.AlertConfigDesc{User: "user-2", RawConfig: "content-2"} @@ -169,10 +174,13 @@ func TestAlertStore_DeleteAlertConfig(t *testing.T) { }) } -func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertStore, b *mockBucket, client any)) { +func runForEachAlertStore(t *testing.T, testFn func(t *testing.T, store AlertStore, b *MockBucket, client any)) { bucketClient := objstore.NewInMemBucket() - mBucketClient := &mockBucket{Bucket: bucketClient} - bucketStore := bucketclient.NewBucketAlertStore(mBucketClient, nil, log.NewNopLogger()) + mBucketClient := &MockBucket{Bucket: bucketClient} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + bucketStore, err := bucketclient.NewBucketAlertStore(mBucketClient, usersScannerConfig, nil, log.NewNopLogger(), reg) + assert.NoError(t, err) stores := map[string]struct { store AlertStore @@ -211,8 +219,11 @@ func makeTestFullState(content string) alertspb.FullStateDesc { func TestBucketAlertStore_GetSetDeleteFullState(t *testing.T) { bucket := objstore.NewInMemBucket() - mBucketClient := &mockBucket{Bucket: bucket} - store := bucketclient.NewBucketAlertStore(mBucketClient, nil, log.NewNopLogger()) + mBucketClient := &MockBucket{Bucket: bucket} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + store, err := bucketclient.NewBucketAlertStore(mBucketClient, usersScannerConfig, nil, log.NewNopLogger(), reg) + assert.NoError(t, err) ctx := context.Background() state1 := makeTestFullState("one") @@ -291,19 +302,3 @@ func TestBucketAlertStore_GetSetDeleteFullState(t *testing.T) { require.NoError(t, store.DeleteFullState(ctx, "user-1")) } } - -type mockBucket struct { - objstore.Bucket - err error -} - -func (m *mockBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - if m.err != nil { - return nil, m.err - } - return m.Bucket.Get(ctx, name) -} - -func (m *mockBucket) IsAccessDeniedErr(err error) bool { - return err == errAccessDenied -} diff --git a/pkg/alertmanager/api.go b/pkg/alertmanager/api.go index cbac5bd89c9..e4b7dd8e579 100644 --- a/pkg/alertmanager/api.go +++ b/pkg/alertmanager/api.go @@ -18,10 +18,10 @@ import ( "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -67,7 +67,7 @@ type UserConfig struct { func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http.Request) { logger := util_log.WithContext(r.Context(), am.logger) - userID, err := tenant.TenantID(r.Context()) + userID, err := users.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) @@ -107,7 +107,7 @@ func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http. func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http.Request) { logger := util_log.WithContext(r.Context(), am.logger) - userID, err := tenant.TenantID(r.Context()) + userID, err := users.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) @@ -167,7 +167,7 @@ func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http. // Note that if no config exists for a user, StatusOK is returned. func (am *MultitenantAlertmanager) DeleteUserConfig(w http.ResponseWriter, r *http.Request) { logger := util_log.WithContext(r.Context(), am.logger) - userID, err := tenant.TenantID(r.Context()) + userID, err := users.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) http.Error(w, fmt.Sprintf("%s: %s", errNoOrgID, err.Error()), http.StatusUnauthorized) diff --git a/pkg/alertmanager/api_test.go b/pkg/alertmanager/api_test.go index 8c0a097d84c..af570e1dd22 100644 --- a/pkg/alertmanager/api_test.go +++ b/pkg/alertmanager/api_test.go @@ -22,10 +22,12 @@ import ( "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/alertmanager/alertspb" + "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore/bucketclient" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestAMConfigValidationAPI(t *testing.T) { @@ -706,8 +708,10 @@ alertmanager_config: | } limits := &mockAlertManagerLimits{} + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) am := &MultitenantAlertmanager{ - store: prepareInMemoryAlertStore(), + store: store, logger: util_log.Logger, limits: limits, } @@ -739,7 +743,12 @@ alertmanager_config: | func TestMultitenantAlertmanager_DeleteUserConfig(t *testing.T) { storage := objstore.NewInMemBucket() - alertStore := bucketclient.NewBucketAlertStore(storage, nil, log.NewNopLogger()) + bkt := &alertstore.MockBucket{Bucket: storage} + + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + alertStore, err := bucketclient.NewBucketAlertStore(bkt, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) am := &MultitenantAlertmanager{ store: alertStore, @@ -823,7 +832,12 @@ receivers: } storage := objstore.NewInMemBucket() - alertStore := bucketclient.NewBucketAlertStore(storage, nil, log.NewNopLogger()) + bkt := &alertstore.MockBucket{Bucket: storage} + + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + alertStore, err := bucketclient.NewBucketAlertStore(bkt, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) for u, cfg := range testCases { err := alertStore.SetAlertConfig(context.Background(), alertspb.AlertConfigDesc{ @@ -834,11 +848,10 @@ receivers: } externalURL := flagext.URLValue{} - err := externalURL.Set("http://localhost:8080/alertmanager") + err = externalURL.Set("http://localhost:8080/alertmanager") require.NoError(t, err) // Create the Multitenant Alertmanager. - reg := prometheus.NewPedanticRegistry() cfg := mockAlertmanagerConfig(t) am, err := createMultitenantAlertmanager(cfg, nil, nil, alertStore, nil, nil, log.NewNopLogger(), reg) require.NoError(t, err) diff --git a/pkg/alertmanager/distributor.go b/pkg/alertmanager/distributor.go index 68be1e24991..e7063d62e47 100644 --- a/pkg/alertmanager/distributor.go +++ b/pkg/alertmanager/distributor.go @@ -2,7 +2,6 @@ package alertmanager import ( "context" - "hash/fnv" "io" "math/rand" "net/http" @@ -21,10 +20,10 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/merger" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) // Distributor forwards requests to individual alertmanagers. @@ -97,11 +96,11 @@ func (d *Distributor) isQuorumReadPath(p string) (bool, merger.Merger) { // In case of reads, it proxies the request to one of the alertmanagers. // DistributeRequest assumes that the caller has verified IsPathSupported returns // true for the route. -func (d *Distributor) DistributeRequest(w http.ResponseWriter, r *http.Request, allowedTenants *util.AllowedTenants) { +func (d *Distributor) DistributeRequest(w http.ResponseWriter, r *http.Request, allowedTenants *users.AllowedTenants) { d.requestsInFlight.Add(1) defer d.requestsInFlight.Done() - userID, err := tenant.TenantID(r.Context()) + userID, err := users.TenantID(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -161,7 +160,7 @@ func (d *Distributor) doQuorum(userID string, w http.ResponseWriter, r *http.Req var responses []*httpgrpc.HTTPResponse var responsesMtx sync.Mutex grpcHeaders := httpToHttpgrpcHeaders(r.Header) - err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, nil, []uint32{shardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error { + err = ring.DoBatch(r.Context(), RingOp, d.alertmanagerRing, nil, []uint32{users.ShardByUser(userID)}, func(am ring.InstanceDesc, _ []int) error { // Use a background context to make sure all alertmanagers get the request even if we return early. localCtx := opentracing.ContextWithSpan(user.InjectOrgID(context.Background(), userID), opentracing.SpanFromContext(r.Context())) sp, localCtx := opentracing.StartSpanFromContext(localCtx, "Distributor.doQuorum") @@ -207,7 +206,7 @@ func (d *Distributor) doQuorum(userID string, w http.ResponseWriter, r *http.Req } func (d *Distributor) doUnary(userID string, w http.ResponseWriter, r *http.Request, logger log.Logger) { - key := shardByUser(userID) + key := users.ShardByUser(userID) replicationSet, err := d.alertmanagerRing.Get(key, RingOp, nil, nil, nil) if err != nil { level.Error(logger).Log("msg", "failed to get replication set from the ring", "err", err) @@ -299,13 +298,6 @@ func (d *Distributor) doRequest(ctx context.Context, am ring.InstanceDesc, req * return amClient.HandleRequest(ctx, req) } -func shardByUser(userID string) uint32 { - ringHasher := fnv.New32a() - // Hasher never returns err. - _, _ = ringHasher.Write([]byte(userID)) - return ringHasher.Sum32() -} - func httpToHttpgrpcHeaders(hs http.Header) []*httpgrpc.Header { result := make([]*httpgrpc.Header, 0, len(hs)) for k, vs := range hs { diff --git a/pkg/alertmanager/distributor_test.go b/pkg/alertmanager/distributor_test.go index fed453c3a89..fb0dd42ace7 100644 --- a/pkg/alertmanager/distributor_test.go +++ b/pkg/alertmanager/distributor_test.go @@ -27,11 +27,11 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/consul" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestDistributor_DistributeRequest(t *testing.T) { @@ -262,9 +262,9 @@ func TestDistributor_DistributeRequest(t *testing.T) { req.Method = http.MethodDelete } req.RequestURI = url - var allowedTenants *util.AllowedTenants + var allowedTenants *users.AllowedTenants if c.isTenantDisabled { - allowedTenants = util.NewAllowedTenants(nil, []string{"1"}) + allowedTenants = users.NewAllowedTenants(nil, []string{"1"}) } w := httptest.NewRecorder() diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 1a3e2b3c078..4bd74630c45 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -33,12 +33,12 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -91,6 +91,8 @@ type MultitenantAlertmanagerConfig struct { EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` + + CleanUpInterval time.Duration `yaml:"-"` } type ClusterConfig struct { @@ -284,7 +286,7 @@ type MultitenantAlertmanager struct { limits Limits - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants registry prometheus.Registerer ringCheckErrors prometheus.Counter @@ -292,6 +294,8 @@ type MultitenantAlertmanager struct { tenantsDiscovered prometheus.Gauge syncTotal *prometheus.CounterVec syncFailures *prometheus.CounterVec + + userIndexUpdater *users.UserIndexUpdater } // NewMultitenantAlertmanager creates a new MultitenantAlertmanager. @@ -374,10 +378,11 @@ func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackC multitenantMetrics: newMultitenantAlertmanagerMetrics(registerer), peer: peer, store: store, + userIndexUpdater: store.GetUserIndexUpdater(), logger: log.With(logger, "component", "MultiTenantAlertmanager"), registry: registerer, limits: limits, - allowedTenants: util.NewAllowedTenants(cfg.EnabledTenants, cfg.DisabledTenants), + allowedTenants: users.NewAllowedTenants(cfg.EnabledTenants, cfg.DisabledTenants), ringCheckErrors: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_alertmanager_ring_check_errors_total", Help: "Number of errors that have occurred when checking the ring for ownership.", @@ -667,6 +672,10 @@ func (am *MultitenantAlertmanager) run(ctx context.Context) error { ringTickerChan = ringTicker.C } + if am.cfg.ShardingEnabled && am.userIndexUpdater != nil { + go am.userIndexUpdateLoop(ctx) + } + for { select { case <-ctx.Done(): @@ -693,6 +702,32 @@ func (am *MultitenantAlertmanager) run(ctx context.Context) error { } } +func (am *MultitenantAlertmanager) userIndexUpdateLoop(ctx context.Context) { + // Hardcode ID to check which alertmanager owns updating user index. + userID := users.UserIndexCompressedFilename + // Align with clean up interval. + ticker := time.NewTicker(util.DurationWithJitter(am.userIndexUpdater.GetCleanUpInterval(), 0.1)) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + level.Error(am.logger).Log("msg", "context timeout, exit user index update loop", "err", ctx.Err()) + return + case <-ticker.C: + owned := am.isUserOwned(userID) + if !owned { + continue + } + if err := am.userIndexUpdater.UpdateUserIndex(ctx); err != nil { + level.Error(am.logger).Log("msg", "failed to update user index", "err", err) + // Wait for next interval. Worst case, the user index scanner will fallback to list strategy. + continue + } + } + } +} + func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncReason string) error { level.Info(am.logger).Log("msg", "synchronizing alertmanager configs for users") am.syncTotal.WithLabelValues(syncReason).Inc() @@ -795,7 +830,7 @@ func (am *MultitenantAlertmanager) isUserOwned(userID string) bool { return true } - alertmanagers, err := am.ring.Get(shardByUser(userID), SyncRingOp, nil, nil, nil) + alertmanagers, err := am.ring.Get(users.ShardByUser(userID), SyncRingOp, nil, nil, nil) if err != nil { am.ringCheckErrors.Inc() level.Error(am.logger).Log("msg", "failed to load alertmanager configuration", "user", userID, "err", err) @@ -1005,7 +1040,7 @@ func (am *MultitenantAlertmanager) GetPositionForUser(userID string) int { return 0 } - set, err := am.ring.Get(shardByUser(userID), RingOp, nil, nil, nil) + set, err := am.ring.Get(users.ShardByUser(userID), RingOp, nil, nil, nil) if err != nil { level.Error(am.logger).Log("msg", "unable to read the ring while trying to determine the alertmanager position", "err", err) // If we're unable to determine the position, we don't want a tenant to miss out on the notification - instead, @@ -1048,7 +1083,7 @@ func (am *MultitenantAlertmanager) HandleRequest(ctx context.Context, in *httpgr // serveRequest serves the Alertmanager's web UI and API. func (am *MultitenantAlertmanager) serveRequest(w http.ResponseWriter, req *http.Request) { - userID, err := tenant.TenantID(req.Context()) + userID, err := users.TenantID(req.Context()) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -1106,7 +1141,7 @@ func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, us level.Debug(am.logger).Log("msg", "message received for replication", "user", userID, "key", part.Key) selfAddress := am.ringLifecycler.GetInstanceAddr() - err := ring.DoBatch(ctx, RingOp, am.ring, nil, []uint32{shardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error { + err := ring.DoBatch(ctx, RingOp, am.ring, nil, []uint32{users.ShardByUser(userID)}, func(desc ring.InstanceDesc, _ []int) error { if desc.GetAddr() == selfAddress { return nil } @@ -1137,7 +1172,7 @@ func (am *MultitenantAlertmanager) ReplicateStateForUser(ctx context.Context, us // state from all replicas, but will consider it a success if state is obtained from at least one replica. func (am *MultitenantAlertmanager) ReadFullStateForUser(ctx context.Context, userID string) ([]*clusterpb.FullState, error) { // Only get the set of replicas which contain the specified user. - key := shardByUser(userID) + key := users.ShardByUser(userID) replicationSet, err := am.ring.Get(key, RingOp, nil, nil, nil) if err != nil { return nil, err @@ -1197,7 +1232,7 @@ func (am *MultitenantAlertmanager) ReadFullStateForUser(ctx context.Context, use // UpdateState implements the Alertmanager service. func (am *MultitenantAlertmanager) UpdateState(ctx context.Context, part *clusterpb.Part) (*alertmanagerpb.UpdateStateResponse, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } @@ -1307,7 +1342,7 @@ func (am *MultitenantAlertmanager) getPerUserDirectories() map[string]string { // UpdateState implements the Alertmanager service. func (am *MultitenantAlertmanager) ReadState(ctx context.Context, req *alertmanagerpb.ReadStateRequest) (*alertmanagerpb.ReadStateResponse, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index fe00b3c94e4..276c4c4b52a 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -51,6 +51,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -210,7 +211,8 @@ func TestMultitenantAlertmanager_loadAndSyncConfigs(t *testing.T) { ctx := context.Background() // Run this test using a real storage client. - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) require.NoError(t, store.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ User: "user1", RawConfig: simpleConfigOne, @@ -544,7 +546,8 @@ receivers: alertmanagerCfg := testData.getAlertmanagerConfig(fmt.Sprintf("http://%s", server.Listener.Addr().String())) // Store the alertmanager config in the bucket. - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) require.NoError(t, store.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ User: userID, RawConfig: alertmanagerCfg, @@ -631,7 +634,8 @@ func TestMultitenantAlertmanager_migrateStateFilesToPerTenantDirectories(t *test user2 = "user2" ) - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) require.NoError(t, store.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ User: user2, RawConfig: simpleConfigOne, @@ -685,7 +689,8 @@ func TestMultitenantAlertmanager_deleteUnusedLocalUserState(t *testing.T) { user2 = "user2" ) - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) require.NoError(t, store.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ User: user2, RawConfig: simpleConfigOne, @@ -720,7 +725,8 @@ func TestMultitenantAlertmanager_deleteUnusedLocalUserState(t *testing.T) { func TestMultitenantAlertmanager_zoneAwareSharding(t *testing.T) { ctx := context.Background() - alertStore := prepareInMemoryAlertStore() + alertStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) @@ -800,7 +806,8 @@ func TestMultitenantAlertmanager_deleteUnusedRemoteUserState(t *testing.T) { user2 = "user2" ) - alertStore := prepareInMemoryAlertStore() + alertStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) @@ -908,12 +915,13 @@ func TestMultitenantAlertmanager_NoExternalURL(t *testing.T) { func TestMultitenantAlertmanager_ServeHTTP(t *testing.T) { // Run this test using a real storage client. - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) amConfig := mockAlertmanagerConfig(t) externalURL := flagext.URLValue{} - err := externalURL.Set("http://localhost:8080/alertmanager") + err = externalURL.Set("http://localhost:8080/alertmanager") require.NoError(t, err) amConfig.ExternalURL = externalURL @@ -1013,10 +1021,11 @@ func TestMultitenantAlertmanager_ServeHTTPWithFallbackConfig(t *testing.T) { amConfig := mockAlertmanagerConfig(t) // Run this test using a real storage client. - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) externalURL := flagext.URLValue{} - err := externalURL.Set("http://localhost:8080/alertmanager") + err = externalURL.Set("http://localhost:8080/alertmanager") require.NoError(t, err) fallbackCfg := ` @@ -1124,7 +1133,10 @@ func TestMultitenantAlertmanager_InitialSyncWithSharding(t *testing.T) { // Use an alert store with a mocked backend. bkt := &bucket.ClientMock{} - alertStore := bucketclient.NewBucketAlertStore(bkt, nil, log.NewNopLogger()) + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + alertStore, err := bucketclient.NewBucketAlertStore(bkt, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) // Setup the initial instance state in the ring. if tt.existing { @@ -1151,6 +1163,7 @@ func TestMultitenantAlertmanager_InitialSyncWithSharding(t *testing.T) { require.True(t, am.ringLifecycler.IsRegistered()) require.Equal(t, ring.JOINING.String(), am.ringLifecycler.GetState().String()) }) + bkt.MockIter("alerts/__markers__", nil, nil) bkt.MockIter("alertmanager/", nil, nil) // Once successfully started, the instance should be ACTIVE in the ring. @@ -1307,7 +1320,8 @@ func TestMultitenantAlertmanager_PerTenantSharding(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - alertStore := prepareInMemoryAlertStore() + alertStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) var instances []*MultitenantAlertmanager var instanceIDs []string @@ -1523,7 +1537,8 @@ func TestMultitenantAlertmanager_SyncOnRingTopologyChanges(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - alertStore := prepareInMemoryAlertStore() + alertStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) reg := prometheus.NewPedanticRegistry() am, err := createMultitenantAlertmanager(amConfig, nil, nil, alertStore, ringStore, nil, log.NewNopLogger(), reg) @@ -1576,7 +1591,8 @@ func TestMultitenantAlertmanager_RingLifecyclerShouldAutoForgetUnhealthyInstance ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - alertStore := prepareInMemoryAlertStore() + alertStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) am, err := createMultitenantAlertmanager(amConfig, nil, nil, alertStore, ringStore, nil, log.NewNopLogger(), nil) require.NoError(t, err) @@ -1615,7 +1631,10 @@ func TestMultitenantAlertmanager_InitialSyncFailureWithSharding(t *testing.T) { bkt := &bucket.ClientMock{} bkt.MockIter("alerts/", nil, errors.New("failed to list alerts")) bkt.MockIter("alertmanager/", nil, nil) - store := bucketclient.NewBucketAlertStore(bkt, nil, log.NewNopLogger()) + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + store, err := bucketclient.NewBucketAlertStore(bkt, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) am, err := createMultitenantAlertmanager(amConfig, nil, nil, store, ringStore, nil, log.NewNopLogger(), nil) require.NoError(t, err) @@ -1634,7 +1653,8 @@ func TestAlertmanager_ReplicasPosition(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - mockStore := prepareInMemoryAlertStore() + mockStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) require.NoError(t, mockStore.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ User: "user-1", RawConfig: simpleConfigOne, @@ -1735,10 +1755,11 @@ func TestAlertmanager_StateReplicationWithSharding(t *testing.T) { ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - mockStore := prepareInMemoryAlertStore() + mockStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) clientPool := newPassthroughAlertmanagerClientPool() externalURL := flagext.URLValue{} - err := externalURL.Set("http://localhost:8080/alertmanager") + err = externalURL.Set("http://localhost:8080/alertmanager") require.NoError(t, err) var instances []*MultitenantAlertmanager @@ -1935,10 +1956,11 @@ func TestAlertmanager_StateReplicationWithSharding_InitialSyncFromPeers(t *testi ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - mockStore := prepareInMemoryAlertStore() + mockStore, err := prepareInMemoryAlertStore() + require.NoError(t, err) clientPool := newPassthroughAlertmanagerClientPool() externalURL := flagext.URLValue{} - err := externalURL.Set("http://localhost:8080/alertmanager") + err = externalURL.Set("http://localhost:8080/alertmanager") require.NoError(t, err) var instances []*MultitenantAlertmanager @@ -2108,8 +2130,12 @@ func TestAlertmanager_StateReplicationWithSharding_InitialSyncFromPeers(t *testi } // prepareInMemoryAlertStore builds and returns an in-memory alert store. -func prepareInMemoryAlertStore() alertstore.AlertStore { - return bucketclient.NewBucketAlertStore(objstore.NewInMemBucket(), nil, log.NewNopLogger()) +func prepareInMemoryAlertStore() (alertstore.AlertStore, error) { + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + bucket := objstore.NewInMemBucket() + mBucketClient := &alertstore.MockBucket{Bucket: bucket} + return bucketclient.NewBucketAlertStore(mBucketClient, usersScannerConfig, nil, log.NewNopLogger(), reg) } func prepareUserDir(t *testing.T, storeDir string, user string) (userDir string, templateDir string) { @@ -2196,7 +2222,8 @@ receivers: ` // Run this test using a real storage client. - store := prepareInMemoryAlertStore() + store, err := prepareInMemoryAlertStore() + require.NoError(t, err) require.NoError(t, store.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ User: "user", RawConfig: config, diff --git a/pkg/compactor/block_visit_marker_test.go b/pkg/compactor/block_visit_marker_test.go index f1dc066aabe..46b444b0947 100644 --- a/pkg/compactor/block_visit_marker_test.go +++ b/pkg/compactor/block_visit_marker_test.go @@ -13,7 +13,7 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestMarkBlocksVisited(t *testing.T) { diff --git a/pkg/compactor/blocks_cleaner.go b/pkg/compactor/blocks_cleaner.go index 8e1e9a60551..ae03711fad7 100644 --- a/pkg/compactor/blocks_cleaner.go +++ b/pkg/compactor/blocks_cleaner.go @@ -20,13 +20,12 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_parquet "github.com/cortexproject/cortex/pkg/storage/parquet" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -536,7 +535,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog } level.Info(userLogger).Log("msg", "completed deleting blocks for tenant marked for deletion", "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) - mark, err := cortex_tsdb.ReadTenantDeletionMark(ctx, c.bucketClient, userID) + mark, err := users.ReadTenantDeletionMark(ctx, c.bucketClient, userID, userLogger) if err != nil { return errors.Wrap(err, "failed to read tenant deletion mark") } @@ -549,7 +548,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog if deletedBlocks.Load() > 0 || mark.FinishedTime == 0 { level.Info(userLogger).Log("msg", "updating finished time in tenant deletion mark") mark.FinishedTime = time.Now().Unix() - return errors.Wrap(cortex_tsdb.WriteTenantDeletionMark(ctx, c.bucketClient, userID, mark), "failed to update tenant deletion mark") + return errors.Wrap(users.WriteTenantDeletionMark(ctx, c.bucketClient, userID, mark), "failed to update tenant deletion mark") } if time.Since(time.Unix(mark.FinishedTime, 0)) < c.cfg.TenantCleanupDelay { return nil @@ -567,7 +566,7 @@ func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userLog } else if deleted > 0 { level.Info(userLogger).Log("msg", "deleted marker files for tenant marked for deletion", "count", deleted, "duration", time.Since(begin), "duration_ms", time.Since(begin).Milliseconds()) } - if err := cortex_tsdb.DeleteTenantDeletionMark(ctx, c.bucketClient, userID); err != nil { + if err := users.DeleteTenantDeletionMark(ctx, c.bucketClient, userID); err != nil { return errors.Wrap(err, "failed to delete tenant deletion mark") } return nil diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 9b13d7c1b91..a7f7a0b04f9 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -23,11 +23,11 @@ import ( "github.com/cortexproject/cortex/pkg/storage/parquet" "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" ) type testBlocksCleanerOptions struct { @@ -84,8 +84,8 @@ func TestBlockCleaner_KeyPermissionDenied(t *testing.T) { logger := log.NewNopLogger() reg := prometheus.NewRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, mbucket, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -163,15 +163,15 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions createDeletionMark(t, bucketClient, "user-2", block7, now.Add(-deletionDelay).Add(-time.Hour)) // Block reached the deletion threshold. // Blocks for user-3, tenant marked for deletion. - require.NoError(t, tsdb.WriteTenantDeletionMark(context.Background(), bucketClient, "user-3", tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, users.WriteTenantDeletionMark(context.Background(), bucketClient, "user-3", users.NewTenantDeletionMark(time.Now()))) block9 := createTSDBBlock(t, bucketClient, "user-3", 10, 30, nil) block10 := createTSDBBlock(t, bucketClient, "user-3", 30, 50, nil) createParquetMarker(t, bucketClient, "user-3", block10) // User-4 with no more blocks, but couple of mark and debug files. Should be fully deleted. - user4Mark := tsdb.NewTenantDeletionMark(time.Now()) + user4Mark := users.NewTenantDeletionMark(time.Now()) user4Mark.FinishedTime = time.Now().Unix() - 60 // Set to check final user cleanup. - require.NoError(t, tsdb.WriteTenantDeletionMark(context.Background(), bucketClient, "user-4", user4Mark)) + require.NoError(t, users.WriteTenantDeletionMark(context.Background(), bucketClient, "user-4", user4Mark)) user4DebugMetaFile := path.Join("user-4", block.DebugMetas, "meta.json") require.NoError(t, bucketClient.Upload(context.Background(), user4DebugMetaFile, strings.NewReader("some random content here"))) @@ -203,8 +203,8 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions reg := prometheus.NewPedanticRegistry() logger := log.NewNopLogger() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -269,7 +269,7 @@ func testBlocksCleanerWithOptions(t *testing.T, options testBlocksCleanerOptions {"user-3", true}, {"user-4", options.user4FilesExist}, } { - exists, err := tsdb.TenantDeletionMarkExists(ctx, bucketClient, tc.user) + exists, err := users.TenantDeletionMarkExists(ctx, bucketClient, tc.user) require.NoError(t, err) assert.Equal(t, tc.expectedExists, exists, tc.user) } @@ -397,8 +397,8 @@ func TestBlocksCleaner_ShouldContinueOnBlockDeletionFailure(t *testing.T) { logger := log.NewNopLogger() reg := prometheus.NewRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -467,8 +467,8 @@ func TestBlocksCleaner_ShouldRebuildBucketIndexOnCorruptedOne(t *testing.T) { logger := log.NewNopLogger() reg := prometheus.NewRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -530,8 +530,8 @@ func TestBlocksCleaner_ShouldRemoveMetricsForTenantsNotBelongingAnymoreToTheShar ctx := context.Background() logger := log.NewNopLogger() reg := prometheus.NewRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -567,8 +567,8 @@ func TestBlocksCleaner_ShouldRemoveMetricsForTenantsNotBelongingAnymoreToTheShar )) // Override the users scanner to reconfigure it to only return a subset of users. - cleaner.usersScanner, err = users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + cleaner.usersScanner, err = users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cleaner.usersScanner = users.NewShardedScanner(cleaner.usersScanner, func(userID string) (bool, error) { return userID == "user-1", nil }, logger) @@ -680,8 +680,8 @@ func TestBlocksCleaner_ShouldRemoveBlocksOutsideRetentionPeriod(t *testing.T) { ctx := context.Background() logger := log.NewNopLogger() reg := prometheus.NewPedanticRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -913,8 +913,8 @@ func TestBlocksCleaner_CleanPartitionedGroupInfo(t *testing.T) { ctx := context.Background() logger := log.NewNopLogger() reg := prometheus.NewPedanticRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -988,8 +988,8 @@ func TestBlocksCleaner_DeleteEmptyBucketIndex(t *testing.T) { ctx := context.Background() logger := log.NewNopLogger() reg := prometheus.NewPedanticRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, reg) require.NoError(t, err) cfgProvider := newMockConfigProvider() @@ -1140,8 +1140,8 @@ func TestBlocksCleaner_EmitUserMetrics(t *testing.T) { ctx := context.Background() logger := log.NewNopLogger() registry := prometheus.NewPedanticRegistry() - scanner, err := users.NewScanner(tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, registry) require.NoError(t, err) cfgProvider := newMockConfigProvider() diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 65fa95fea07..e05b61f5e82 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -34,12 +34,12 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/backoff" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -403,7 +403,7 @@ type Compactor struct { logger log.Logger parentLogger log.Logger registerer prometheus.Registerer - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants limits *validation.Overrides // Functions that creates bucket client, grouper, planner and compactor using the context. @@ -541,7 +541,7 @@ func newCompactor( blocksCompactorFactory: blocksCompactorFactory, blockDeletableCheckerFactory: blockDeletableCheckerFactory, compactionLifecycleCallbackFactory: compactionLifecycleCallbackFactory, - allowedTenants: util.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), + allowedTenants: users.NewAllowedTenants(compactorCfg.EnabledTenants, compactorCfg.DisabledTenants), CompactorStartDurationSeconds: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ Name: "cortex_compactor_start_duration_seconds", @@ -757,12 +757,12 @@ func (c *Compactor) starting(ctx context.Context) error { // If sharding is disabled, there is no need to have every compactor to run the user index updater // as it will be the same to fallback to list strategy. - if c.compactorCfg.ShardingEnabled && c.storageCfg.UsersScanner.Strategy == cortex_tsdb.UserScanStrategyUserIndex { + if c.compactorCfg.ShardingEnabled && c.storageCfg.UsersScanner.Strategy == users.UserScanStrategyUserIndex { // We hardcode strategy to be list so can ignore error. - baseScanner, _ := users.NewScanner(cortex_tsdb.UsersScannerConfig{ - Strategy: cortex_tsdb.UserScanStrategyList, + baseScanner, _ := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, c.bucketClient, c.logger, c.registerer) - c.userIndexUpdater = users.NewUserIndexUpdater(c.bucketClient, baseScanner, c.registerer) + c.userIndexUpdater = users.NewUserIndexUpdater(c.bucketClient, c.storageCfg.UsersScanner.CleanUpInterval, baseScanner, extprom.WrapRegistererWith(prometheus.Labels{"component": "compactor"}, c.registerer)) } return nil @@ -846,25 +846,25 @@ func (c *Compactor) compactUsers(ctx context.Context) { }() level.Info(c.logger).Log("msg", "discovering users from bucket") - users, err := c.discoverUsersWithRetries(ctx) + userIDs, err := c.discoverUsersWithRetries(ctx) if err != nil { level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err) return } - level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(users)) - c.CompactionRunDiscoveredTenants.Set(float64(len(users))) + level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(userIDs)) + c.CompactionRunDiscoveredTenants.Set(float64(len(userIDs))) // When starting multiple compactor replicas nearly at the same time, running in a cluster with // a large number of tenants, we may end up in a situation where the 1st user is compacted by // multiple replicas at the same time. Shuffling users helps reduce the likelihood this will happen. - rand.Shuffle(len(users), func(i, j int) { - users[i], users[j] = users[j], users[i] + rand.Shuffle(len(userIDs), func(i, j int) { + userIDs[i], userIDs[j] = userIDs[j], userIDs[i] }) // Keep track of users owned by this shard, so that we can delete the local files for all other users. ownedUsers := map[string]struct{}{} - for _, userID := range users { + for _, userID := range userIDs { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). if ctx.Err() != nil { interrupted = true @@ -894,7 +894,7 @@ func (c *Compactor) compactUsers(ctx context.Context) { ownedUsers[userID] = struct{}{} - if markedForDeletion, err := cortex_tsdb.TenantDeletionMarkExists(ctx, c.bucketClient, userID); err != nil { + if markedForDeletion, err := users.TenantDeletionMarkExists(ctx, c.bucketClient, userID); err != nil { c.CompactionRunSkippedTenants.Inc() level.Warn(c.logger).Log("msg", "unable to check if user is marked for deletion", "user", userID, "err", err) continue @@ -1202,7 +1202,7 @@ func (c *Compactor) userIndexUpdateLoop(ctx context.Context) { // Hardcode ID to check which compactor owns updating user index. userID := users.UserIndexCompressedFilename // Align with clean up interval. - ticker := time.NewTicker(util.DurationWithJitter(c.compactorCfg.CleanupInterval, 0.1)) + ticker := time.NewTicker(util.DurationWithJitter(c.storageCfg.UsersScanner.CleanUpInterval, 0.1)) defer ticker.Stop() for { diff --git a/pkg/compactor/compactor_paritioning_test.go b/pkg/compactor/compactor_paritioning_test.go index 593e94d2aec..6b32824f6b4 100644 --- a/pkg/compactor/compactor_paritioning_test.go +++ b/pkg/compactor/compactor_paritioning_test.go @@ -34,13 +34,13 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_storage_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" cortex_testutil "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -206,8 +206,8 @@ func TestPartitionCompactor_SkipCompactionWhenCmkError(t *testing.T) { bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index-sync-status.json", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockIter(userID+"/"+PartitionedGroupDirectory, nil, nil) cfg := prepareConfigForPartitioning() @@ -386,8 +386,8 @@ func TestPartitionCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASing bucketClient.MockGet(userID+"/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload(userID+"/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete(userID+"/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) @@ -442,8 +442,8 @@ func TestPartitionCompactor_ShouldCompactAndRemoveUserFolder(t *testing.T) { bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) @@ -498,10 +498,10 @@ func TestPartitionCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) @@ -650,8 +650,8 @@ func TestPartitionCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ"}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) // Block that has just been marked for deletion. It will not be deleted just yet, and it also will not be compacted. bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) @@ -774,10 +774,10 @@ func TestPartitionCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testin bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) @@ -868,8 +868,8 @@ func TestPartitionCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *t bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("__markers__", []string{"__markers__/user-1/"}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D"}, nil) - bucketClient.MockGet(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), `{"deletion_time": 1}`, nil) - bucketClient.MockUpload(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), nil) + bucketClient.MockGet(users.GetGlobalDeletionMarkPath("user-1"), `{"deletion_time": 1}`, nil) + bucketClient.MockUpload(users.GetGlobalDeletionMarkPath("user-1"), nil) bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) @@ -965,7 +965,7 @@ func TestPartitionCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *t } func TestPartitionCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { - bucketClient, tmpDir := cortex_storage_testutil.PrepareFilesystemBucket(t) + bucketClient, tmpDir := testutil.PrepareFilesystemBucket(t) bucketClient = bucketindex.BucketWithGlobalMarkers(bucketClient) b1 := createTSDBBlock(t, bucketClient, "user-1", 10, 20, map[string]string{"__name__": "Teste"}) @@ -1037,10 +1037,10 @@ func TestPartitionCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInst bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) //bucketClient.MockIterWithAttributes("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) @@ -1162,8 +1162,8 @@ func TestPartitionCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEn bucketClient.MockGet(userID+"/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload(userID+"/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete(userID+"/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) @@ -1308,8 +1308,8 @@ func TestPartitionCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingE bucketClient.MockGet(userID+"/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload(userID+"/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete(userID+"/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) bucketClient.MockUpload(userID+"/bucket-index-sync-status.json", nil) @@ -1420,7 +1420,7 @@ func prepareForPartitioning(t *testing.T, compactorCfg Config, bucketClient objs storageCfg := cortex_tsdb.BlocksStorageConfig{} flagext.DefaultValues(&storageCfg) storageCfg.BucketStore.BlockDiscoveryStrategy = string(cortex_tsdb.RecursiveDiscovery) - storageCfg.UsersScanner.Strategy = cortex_tsdb.UserScanStrategyUserIndex + storageCfg.UsersScanner.Strategy = users.UserScanStrategyUserIndex // Create a temporary directory for compactor data. compactorCfg.DataDir = t.TempDir() @@ -1676,8 +1676,8 @@ func TestPartitionCompactor_ShouldNotHangIfPlannerReturnNothing(t *testing.T) { bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) @@ -1736,8 +1736,8 @@ func TestPartitionCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSy bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), bucket.ErrKeyPermissionDenied) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", bucket.ErrKeyPermissionDenied) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", bucket.ErrKeyPermissionDenied) @@ -1790,8 +1790,8 @@ func TestPartitionCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFrom bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) diff --git a/pkg/compactor/compactor_test.go b/pkg/compactor/compactor_test.go index 5724c946990..c8c617d4279 100644 --- a/pkg/compactor/compactor_test.go +++ b/pkg/compactor/compactor_test.go @@ -40,13 +40,13 @@ import ( "github.com/cortexproject/cortex/pkg/storage/parquet" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_storage_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" cortex_testutil "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -173,6 +173,7 @@ func TestCompactor_SkipCompactionWhenCmkError(t *testing.T) { // No user blocks stored in the bucket. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{userID}, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter(userID+"/", []string{userID + "/01DTVP434PA9VFXSW2JKB3392D"}, nil) @@ -185,8 +186,8 @@ func TestCompactor_SkipCompactionWhenCmkError(t *testing.T) { bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index-sync-status.json", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) cfg := prepareConfig() c, _, _, logs, _ := prepare(t, cfg, bucketClient, nil) @@ -207,6 +208,7 @@ func TestCompactor_ShouldDoNothingOnNoUserBlocks(t *testing.T) { // No user blocks stored in the bucket. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{}, nil) bucketClient.MockIter("__markers__", []string{}, nil) cfg := prepareConfig() @@ -284,6 +286,7 @@ func TestCompactor_ShouldRetryCompactionOnFailureWhileDiscoveringUsersFromBucket // Fail to iterate over the bucket while discovering users. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("__markers__", nil, errors.New("failed to iterate the bucket")) bucketClient.MockIter("", nil, errors.New("failed to iterate the bucket")) @@ -357,6 +360,7 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( userID := "test-user" bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{userID}, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter(userID+"/", []string{userID + "/01DTVP434PA9VFXSW2JKB3392D", userID + "/01FN6CDF3PNEWWRY5MPGJPE3EX", userID + "/01DTVP434PA9VFXSW2JKB3392D/meta.json", userID + "/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) @@ -364,8 +368,8 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( bucketClient.MockGet(userID+"/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload(userID+"/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete(userID+"/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) @@ -414,10 +418,11 @@ func TestCompactor_ShouldIncrementCompactionErrorIfFailedToCompactASingleTenant( func TestCompactor_ShouldCompactAndRemoveUserFolder(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) @@ -464,12 +469,13 @@ func TestCompactor_ShouldIterateOverUsersAndRunCompaction(t *testing.T) { // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) @@ -609,11 +615,12 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForDeletion(t *testing.T) { // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ"}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) // Block that has just been marked for deletion. It will not be deleted just yet, and it also will not be compacted. bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) @@ -731,12 +738,13 @@ func TestCompactor_ShouldNotCompactBlocksMarkedForSkipCompact(t *testing.T) { // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) @@ -817,11 +825,12 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("__markers__", []string{"__markers__/user-1/"}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D"}, nil) - bucketClient.MockGet(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), `{"deletion_time": 1}`, nil) - bucketClient.MockUpload(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), nil) + bucketClient.MockGet(users.GetGlobalDeletionMarkPath("user-1"), `{"deletion_time": 1}`, nil) + bucketClient.MockUpload(users.GetGlobalDeletionMarkPath("user-1"), nil) bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) @@ -913,7 +922,7 @@ func TestCompactor_ShouldNotCompactBlocksForUsersMarkedForDeletion(t *testing.T) } func TestCompactor_ShouldSkipOutOrOrderBlocks(t *testing.T) { - bucketClient, tmpDir := cortex_storage_testutil.PrepareFilesystemBucket(t) + bucketClient, tmpDir := testutil.PrepareFilesystemBucket(t) bucketClient = bucketindex.BucketWithGlobalMarkers(bucketClient) b1 := createTSDBBlock(t, bucketClient, "user-1", 10, 20, map[string]string{"__name__": "Teste"}) @@ -981,12 +990,13 @@ func TestCompactor_ShouldCompactAllUsersOnShardingEnabledButOnlyOneInstanceRunni // Mock the bucket to contain two users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01FN6CDF3PNEWWRY5MPGJPE3EX/meta.json"}, nil) bucketClient.MockIter("user-2/", []string{"user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-2/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json", "user-2/01FN3V83ABR9992RF8WRJZ76ZQ/meta.json"}, nil) bucketClient.MockIter("user-1/markers/", nil, nil) @@ -1092,6 +1102,7 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM // Mock the bucket to contain all users, each one with one block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", userIDs, nil) bucketClient.MockIter("__markers__", []string{}, nil) for _, userID := range userIDs { @@ -1100,8 +1111,8 @@ func TestCompactor_ShouldCompactOnlyUsersOwnedByTheInstanceOnShardingEnabledAndM bucketClient.MockGet(userID+"/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload(userID+"/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete(userID+"/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet(userID+"/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) @@ -1203,6 +1214,7 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit // Mock the bucket to contain all users, each one with five blocks, 2 sets of overlapping blocks and 1 separate block. bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", userIDs, nil) bucketClient.MockIter("__markers__", []string{}, nil) @@ -1238,8 +1250,8 @@ func TestCompactor_ShouldCompactOnlyShardsOwnedByTheInstanceOnShardingEnabledWit bucketClient.MockGet(userID+"/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload(userID+"/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete(userID+"/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(userID), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(userID), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(userID), false, nil) bucketClient.MockGet(userID+"/bucket-index.json.gz", "", nil) bucketClient.MockUpload(userID+"/bucket-index.json.gz", nil) bucketClient.MockUpload(userID+"/bucket-index-sync-status.json", nil) @@ -1567,7 +1579,8 @@ func prepare(t *testing.T, compactorCfg Config, bucketClient objstore.Instrument storageCfg := cortex_tsdb.BlocksStorageConfig{} flagext.DefaultValues(&storageCfg) storageCfg.BucketStore.BlockDiscoveryStrategy = string(cortex_tsdb.RecursiveDiscovery) - storageCfg.UsersScanner.Strategy = cortex_tsdb.UserScanStrategyUserIndex + storageCfg.UsersScanner.Strategy = users.UserScanStrategyUserIndex + storageCfg.UsersScanner.CleanUpInterval = 100 * time.Millisecond // Short interval for testing // Create a temporary directory for compactor data. compactorCfg.DataDir = t.TempDir() @@ -1855,6 +1868,7 @@ func TestCompactor_ShouldFailCompactionOnTimeout(t *testing.T) { // Mock the bucket bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("", []string{}, nil) bucketClient.MockIter("__markers__", []string{}, nil) @@ -1956,6 +1970,7 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSync(t *tes bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json"}, nil) @@ -1963,8 +1978,8 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrDuringMetaSync(t *tes bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), bucket.ErrKeyPermissionDenied) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", bucket.ErrKeyPermissionDenied) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", bucket.ErrKeyPermissionDenied) @@ -2007,6 +2022,7 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFromBucket(t bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json"}, nil) @@ -2014,8 +2030,8 @@ func TestCompactor_ShouldNotFailCompactionIfAccessDeniedErrReturnedFromBucket(t bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/no-compact-mark.json", "", nil) @@ -2058,6 +2074,7 @@ func TestCompactor_FailedWithRetriableError(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json"}, nil) @@ -2065,8 +2082,8 @@ func TestCompactor_FailedWithRetriableError(t *testing.T) { bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockIter("user-1/01DTVP434PA9VFXSW2JKB3392D", nil, errors.New("test retriable error")) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) @@ -2112,6 +2129,7 @@ func TestCompactor_FailedWithHaltError(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockGet(users.UserIndexCompressedFilename, "", nil) + bucketClient.MockUpload(users.UserIndexCompressedFilename, nil) bucketClient.MockIter("__markers__", []string{}, nil) bucketClient.MockIter("", []string{"user-1"}, nil) bucketClient.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ", "user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "user-1/01DTW0ZCPDDNV4BV83Q2SV4QAZ/meta.json"}, nil) @@ -2119,8 +2137,8 @@ func TestCompactor_FailedWithHaltError(t *testing.T) { bucketClient.MockGet("user-1/markers/cleaner-visit-marker.json", "", nil) bucketClient.MockUpload("user-1/markers/cleaner-visit-marker.json", nil) bucketClient.MockDelete("user-1/markers/cleaner-visit-marker.json", nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucketClient.MockIter("user-1/01DTVP434PA9VFXSW2JKB3392D", nil, compact.HaltError{}) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", mockBlockMetaJSON("01DTVP434PA9VFXSW2JKB3392D"), nil) bucketClient.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/deletion-mark.json", "", nil) @@ -2411,7 +2429,7 @@ func (l *mockTenantLimits) setLimits(userID string, limits *validation.Limits) { func TestCompactor_UserIndexUpdateLoop(t *testing.T) { // Prepare test dependencies - bucketClient, _ := cortex_storage_testutil.PrepareFilesystemBucket(t) + bucketClient, _ := testutil.PrepareFilesystemBucket(t) bucketClient = bucketindex.BucketWithGlobalMarkers(bucketClient) ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) @@ -2422,7 +2440,6 @@ func TestCompactor_UserIndexUpdateLoop(t *testing.T) { cfg.ShardingRing.InstanceID = "compactor-1" cfg.ShardingRing.InstanceAddr = "1.2.3.4" cfg.ShardingRing.KVStore.Mock = ringStore - cfg.CleanupInterval = 100 * time.Millisecond // Short interval for testing compactor, _, _, _, _ := prepare(t, cfg, bucketClient, &validation.Limits{}) diff --git a/pkg/compactor/partitioned_group_info_test.go b/pkg/compactor/partitioned_group_info_test.go index 815f0668a49..6e7dcc44464 100644 --- a/pkg/compactor/partitioned_group_info_test.go +++ b/pkg/compactor/partitioned_group_info_test.go @@ -15,7 +15,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestPartitionedGroupInfo(t *testing.T) { diff --git a/pkg/compactor/visit_marker_test.go b/pkg/compactor/visit_marker_test.go index a8b37011ecd..37451f1526f 100644 --- a/pkg/compactor/visit_marker_test.go +++ b/pkg/compactor/visit_marker_test.go @@ -13,7 +13,7 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/compact" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestMarkPending(t *testing.T) { diff --git a/pkg/configs/api/api.go b/pkg/configs/api/api.go index 56cf15bc943..029528ab784 100644 --- a/pkg/configs/api/api.go +++ b/pkg/configs/api/api.go @@ -22,9 +22,9 @@ import ( "github.com/cortexproject/cortex/pkg/configs/db" "github.com/cortexproject/cortex/pkg/configs/userconfig" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" ) var ( @@ -109,7 +109,7 @@ func (a *API) RegisterRoutes(r *mux.Router) { // getConfig returns the request configuration. func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) + userID, _, err := users.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -147,7 +147,7 @@ func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { } func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) + userID, _, err := users.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -297,7 +297,7 @@ func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) { } func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) + userID, _, err := users.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return @@ -319,7 +319,7 @@ func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { } func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) + userID, _, err := users.ExtractTenantIDFromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index e7575abdcee..a7fa25f3b28 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -21,9 +21,6 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" "gopkg.in/yaml.v2" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/resource" - "github.com/cortexproject/cortex/pkg/alertmanager" "github.com/cortexproject/cortex/pkg/alertmanager/alertstore" "github.com/cortexproject/cortex/pkg/api" @@ -54,17 +51,19 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler" "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/tracing" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/fakeauth" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/grpcutil" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/process" + "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -354,7 +353,7 @@ func New(cfg Config) (*Cortex, error) { // Swap out the default resolver to support multiple tenant IDs separated by a '|' if cfg.TenantFederation.Enabled { util_log.WarnExperimentalUse("tenant-federation") - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) } cfg.API.HTTPAuthMiddleware = fakeauth.SetupAuthMiddleware(&cfg.Server, cfg.AuthEnabled, diff --git a/pkg/cortex/cortex_test.go b/pkg/cortex/cortex_test.go index 6cac224319d..5f7d382ddff 100644 --- a/pkg/cortex/cortex_test.go +++ b/pkg/cortex/cortex_test.go @@ -37,6 +37,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestCortex(t *testing.T) { @@ -87,8 +88,8 @@ func TestCortex(t *testing.T) { Backend: tsdb.IndexCacheBackendInMemory, }, }, - UsersScanner: tsdb.UsersScannerConfig{ - Strategy: tsdb.UserScanStrategyList, + UsersScanner: users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, }, RulerStorage: rulestore.Config{ diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index 013dbb90834..d143f2d9b50 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -51,13 +51,13 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler" "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/grpcclient" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -299,7 +299,7 @@ func (t *Cortex) initTenantFederation() (serv services.Service, err error) { if err != nil { return nil, fmt.Errorf("failed to initialize regex resolver: %v", err) } - tenant.WithDefaultResolver(regexResolver) + users.WithDefaultResolver(regexResolver) return regexResolver, nil } @@ -528,7 +528,7 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro if t.Cfg.TenantFederation.Enabled && t.Cfg.TenantFederation.RegexMatcherEnabled { // If regex matcher enabled, we use regex validator to pass regex to the querier - tenant.WithDefaultResolver(tenantfederation.NewRegexValidator()) + users.WithDefaultResolver(tenantfederation.NewRegexValidator()) } queryRangeMiddlewares, cache, err := queryrange.Middlewares( @@ -814,7 +814,7 @@ func (t *Cortex) initTenantDeletionAPI() (services.Service, error) { func (t *Cortex) initQueryScheduler() (services.Service, error) { if t.Cfg.TenantFederation.Enabled && t.Cfg.TenantFederation.RegexMatcherEnabled { // If regex matcher enabled, we use regex validator to pass regex to the querier - tenant.WithDefaultResolver(tenantfederation.NewRegexValidator()) + users.WithDefaultResolver(tenantfederation.NewRegexValidator()) } s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer, t.Cfg.Querier.DistributedExecEnabled) diff --git a/pkg/cortexpb/extensions.go b/pkg/cortexpb/extensions.go index e75b45e2ae9..a11b8a0e301 100644 --- a/pkg/cortexpb/extensions.go +++ b/pkg/cortexpb/extensions.go @@ -8,7 +8,7 @@ import ( "github.com/cespare/xxhash/v2" - "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util/users" ) const maxBufferSize = 1024 @@ -63,7 +63,7 @@ func (w *WriteRequest) VerifySign(ctx context.Context, signature string) (bool, } func (w *WriteRequest) Sign(ctx context.Context) (string, error) { - u, err := tenant.TenantID(ctx) + u, err := users.TenantID(ctx) if err != nil { return "", err } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 5b32804daca..25239b0b6e7 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -34,7 +34,6 @@ import ( ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/labelset" @@ -43,6 +42,7 @@ import ( util_math "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/requestmeta" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -104,7 +104,7 @@ type Distributor struct { subservices *services.Manager subservicesWatcher *services.FailureWatcher - activeUsers *util.ActiveUsersCleanupService + activeUsers *users.ActiveUsersCleanupService ingestionRate *util_math.EwmaRate inflightPushRequests atomic.Int64 @@ -455,7 +455,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove }) d.replicationFactor.Set(float64(ingestersRing.ReplicationFactor())) - d.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(d.cleanupInactiveUser) + d.activeUsers = users.NewActiveUsersCleanupWithDefaultValues(d.cleanupInactiveUser) subservices = append(subservices, d.ingesterPool, d.activeUsers) d.subservices, err = services.NewManager(subservices...) @@ -691,7 +691,7 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri // Push implements client.IngesterServer func (d *Distributor) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index fd50aef9d1a..41af489cebc 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -45,13 +45,13 @@ import ( ring_client "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/consul" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -1218,7 +1218,7 @@ func TestDistributor_PushHAInstances(t *testing.T) { d := ds[0] - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) assert.NoError(t, err) err = d.HATracker.CheckReplica(ctx, userID, tc.cluster, tc.acceptedReplica, time.Now()) assert.NoError(t, err) @@ -3507,7 +3507,7 @@ func (i *mockIngester) Push(ctx context.Context, req *cortexpb.WriteRequest, opt i.metadata = map[uint32]map[cortexpb.MetricMetadata]struct{}{} } - orgid, err := tenant.TenantID(ctx) + orgid, err := users.TenantID(ctx) if err != nil { return nil, err } diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 3b44d895bc9..9835ab1c822 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -17,12 +17,12 @@ import ( "github.com/cortexproject/cortex/pkg/querier/partialdata" "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/grpcutil" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -83,7 +83,7 @@ func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, part // GetIngestersForQuery returns a replication set including all ingesters that should be queried // to fetch series matching input label matchers. func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*labels.Matcher) (ring.ReplicationSet, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return ring.ReplicationSet{}, err } @@ -114,7 +114,7 @@ func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*lab // GetIngestersForMetadata returns a replication set including all ingesters that should be queried // to fetch metadata (eg. label names/values or series). func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.ReplicationSet, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return ring.ReplicationSet{}, err } diff --git a/pkg/frontend/transport/handler.go b/pkg/frontend/transport/handler.go index f35bab20ff1..79424ec778d 100644 --- a/pkg/frontend/transport/handler.go +++ b/pkg/frontend/transport/handler.go @@ -28,12 +28,12 @@ import ( querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tenantfederation" "github.com/cortexproject/cortex/pkg/querier/tripperware" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_api "github.com/cortexproject/cortex/pkg/util/api" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/requestmeta" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -112,7 +112,7 @@ type Handler struct { queryDataBytes *prometheus.CounterVec rejectedQueries *prometheus.CounterVec slowQueries *prometheus.CounterVec - activeUsers *util.ActiveUsersCleanupService + activeUsers *users.ActiveUsersCleanupService initSlowQueryMetric sync.Once reg prometheus.Registerer @@ -175,7 +175,7 @@ func NewHandler(cfg HandlerConfig, tenantFederationCfg tenantfederation.Config, }, []string{"reason", "source", "user"}, ) - h.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(h.cleanupMetricsForInactiveUser) + h.activeUsers = users.NewActiveUsersCleanupWithDefaultValues(h.cleanupMetricsForInactiveUser) // If cleaner stops or fail, we will simply not clean the metrics for inactive users. _ = h.activeUsers.StartAsync(context.Background()) } @@ -242,13 +242,13 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { queryString url.Values ) - tenantIDs, err := tenant.TenantIDs(r.Context()) + tenantIDs, err := users.TenantIDs(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - userID := tenant.JoinTenantIDs(tenantIDs) + userID := users.JoinTenantIDs(tenantIDs) source := tripperware.GetSource(r) if f.tenantFederationCfg.Enabled { diff --git a/pkg/frontend/transport/handler_test.go b/pkg/frontend/transport/handler_test.go index b6c90a31fcc..6cd90789993 100644 --- a/pkg/frontend/transport/handler_test.go +++ b/pkg/frontend/transport/handler_test.go @@ -28,11 +28,11 @@ import ( "github.com/cortexproject/cortex/pkg/querier" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tenantfederation" - "github.com/cortexproject/cortex/pkg/tenant" util_api "github.com/cortexproject/cortex/pkg/util/api" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/requestmeta" + "github.com/cortexproject/cortex/pkg/util/users" ) type roundTripperFunc func(*http.Request) (*http.Response, error) @@ -626,7 +626,7 @@ func Test_ExtractTenantIDs(t *testing.T) { func Test_TenantFederation_MaxTenant(t *testing.T) { // set a multi tenant resolver - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) roundTripper := roundTripperFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ diff --git a/pkg/frontend/v1/frontend.go b/pkg/frontend/v1/frontend.go index 8dfcd98ef0a..c4294aaaeac 100644 --- a/pkg/frontend/v1/frontend.go +++ b/pkg/frontend/v1/frontend.go @@ -19,12 +19,11 @@ import ( "github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb" "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/scheduler/queue" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -78,7 +77,7 @@ type Frontend struct { retry *transport.Retry requestQueue *queue.RequestQueue - activeUsers *util.ActiveUsersCleanupService + activeUsers *users.ActiveUsersCleanupService // Subservices manager. subservices *services.Manager @@ -133,7 +132,7 @@ func New(cfg Config, limits Limits, log log.Logger, registerer prometheus.Regist } f.requestQueue = queue.NewRequestQueue(cfg.QuerierForgetDelay, f.queueLength, f.discardedRequests, f.limits, registerer) - f.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics) + f.activeUsers = users.NewActiveUsersCleanupWithDefaultValues(f.cleanupInactiveUserMetrics) var err error f.subservices, err = services.NewManager(f.requestQueue, f.activeUsers) @@ -355,7 +354,7 @@ func getQuerierID(server frontendv1pb.Frontend_ProcessServer) (string, error) { } func (f *Frontend) queueRequest(ctx context.Context, req *request) error { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return err } @@ -367,7 +366,7 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error { // aggregate the max queriers limit in the case of a multi tenant query maxQueriers := validation.SmallestPositiveNonZeroFloat64PerTenant(tenantIDs, f.limits.MaxQueriersPerUser) - joinedTenantID := tenant.JoinTenantIDs(tenantIDs) + joinedTenantID := users.JoinTenantIDs(tenantIDs) f.activeUsers.UpdateUserTimestamp(joinedTenantID, now) err = f.requestQueue.EnqueueRequest(joinedTenantID, req, maxQueriers, nil) diff --git a/pkg/frontend/v2/frontend.go b/pkg/frontend/v2/frontend.go index a8f49b11ab5..7631bf701be 100644 --- a/pkg/frontend/v2/frontend.go +++ b/pkg/frontend/v2/frontend.go @@ -22,12 +22,12 @@ import ( "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/scheduler" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) // Config for a Frontend. @@ -172,11 +172,11 @@ func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) return nil, fmt.Errorf("frontend not running: %v", s) } - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, err } - userID := tenant.JoinTenantIDs(tenantIDs) + userID := users.JoinTenantIDs(tenantIDs) // Propagate trace context in gRPC too - this will be ignored if using HTTP. tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) @@ -267,11 +267,11 @@ func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) } func (f *Frontend) QueryResult(ctx context.Context, qrReq *frontendv2pb.QueryResultRequest) (*frontendv2pb.QueryResultResponse, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, err } - userID := tenant.JoinTenantIDs(tenantIDs) + userID := users.JoinTenantIDs(tenantIDs) req := f.requests.get(qrReq.QueryID) // It is possible that some old response belonging to different user was received, if frontend has restarted. diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index ed8bacd45aa..95427d101de 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -8,11 +8,6 @@ import ( "net/http" "sync" - "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util/grpcclient" - "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock" - "github.com/go-kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -22,6 +17,11 @@ import ( "go.uber.org/atomic" "google.golang.org/grpc" "google.golang.org/grpc/health/grpc_health_v1" + + "github.com/cortexproject/cortex/pkg/cortexpb" + "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock" + "github.com/cortexproject/cortex/pkg/util/users" ) var ingesterClientRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ @@ -101,7 +101,7 @@ func (c *closableHealthAndIngesterClient) PushStreamConnection(ctx context.Conte default: } - tenantID, err := tenant.TenantID(ctx) + tenantID, err := users.TenantID(ctx) if err != nil { return nil, err } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 2330fdec595..7628025cd09 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -55,7 +55,6 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/extract" @@ -66,6 +65,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -658,8 +658,8 @@ type TSDBState struct { } type requestWithUsersAndCallback struct { - users *util.AllowedTenants // if nil, all tenants are allowed. - callback chan<- struct{} // when compaction/shipping is finished, this channel is closed + users *users.AllowedTenants // if nil, all tenants are allowed. + callback chan<- struct{} // when compaction/shipping is finished, this channel is closed } func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer) TSDBState { @@ -1169,7 +1169,7 @@ func (i *Ingester) Push(ctx context.Context, req *cortexpb.WriteRequest) (*corte span, ctx := opentracing.StartSpanFromContext(ctx, "Ingester.Push") defer span.Finish() - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } @@ -1678,7 +1678,7 @@ func (i *Ingester) QueryExemplars(ctx context.Context, req *client.ExemplarQuery return nil, err } - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } @@ -1782,7 +1782,7 @@ func (i *Ingester) labelsValuesCommon(ctx context.Context, req *client.LabelValu return nil, cleanup, err } - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, cleanup, err } @@ -1876,7 +1876,7 @@ func (i *Ingester) labelNamesCommon(ctx context.Context, req *client.LabelNamesR return nil, cleanup, err } - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, cleanup, err } @@ -1981,7 +1981,7 @@ func (i *Ingester) metricsForLabelMatchersCommon(ctx context.Context, req *clien return cleanup, err } - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return cleanup, err } @@ -2071,7 +2071,7 @@ func (i *Ingester) MetricsMetadata(ctx context.Context, req *client.MetricsMetad } i.stoppedMtx.RUnlock() - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } @@ -2100,7 +2100,7 @@ func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) return nil, err } - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } @@ -2206,7 +2206,7 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ spanlog, ctx := spanlogger.New(stream.Context(), "QueryStream") defer spanlog.Finish() - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return err } @@ -2800,7 +2800,7 @@ func (i *Ingester) shipBlocksLoop(ctx context.Context) error { } // shipBlocks runs shipping for all users. -func (i *Ingester) shipBlocks(ctx context.Context, allowed *util.AllowedTenants) { +func (i *Ingester) shipBlocks(ctx context.Context, allowed *users.AllowedTenants) { // Do not ship blocks if the ingester is PENDING or JOINING. It's // particularly important for the JOINING state because there could // be a blocks transfer in progress (from another ingester) and if we @@ -2833,7 +2833,7 @@ func (i *Ingester) shipBlocks(ctx context.Context, allowed *util.AllowedTenants) // Even if check fails with error, we don't want to repeat it too often. userDB.lastDeletionMarkCheck.Store(time.Now().Unix()) - deletionMarkExists, err := cortex_tsdb.TenantDeletionMarkExists(ctx, i.TSDBState.bucket, userID) + deletionMarkExists, err := users.TenantDeletionMarkExists(ctx, i.TSDBState.bucket, userID) if err != nil { // If we cannot check for deletion mark, we continue anyway, even though in production shipper will likely fail too. // This however simplifies unit tests, where tenant deletion check is enabled by default, but tests don't setup bucket. @@ -2917,7 +2917,7 @@ func (i *Ingester) compactionLoop(ctx context.Context) error { } // Compacts all compactable blocks. Force flag will force compaction even if head is not compactable yet. -func (i *Ingester) compactBlocks(ctx context.Context, force bool, allowed *util.AllowedTenants) { +func (i *Ingester) compactBlocks(ctx context.Context, force bool, allowed *users.AllowedTenants) { // Don't compact TSDB blocks while JOINING as there may be ongoing blocks transfers. // Compaction loop is not running in LEAVING state, so if we get here in LEAVING state, we're flushing blocks. if i.lifecycler != nil { @@ -3227,7 +3227,7 @@ func (i *Ingester) flushHandler(w http.ResponseWriter, r *http.Request) { tenants := r.Form[tenantParam] - allowedUsers := util.NewAllowedTenants(tenants, nil) + allowedUsers := users.NewAllowedTenants(tenants, nil) run := func() { ingCtx := i.ServiceContext() if ingCtx == nil || ingCtx.Err() != nil { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index d8313aac3e4..1756fbe7448 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -64,6 +64,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -4389,7 +4390,7 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) numObjects := len(bucket.Objects()) require.NotZero(t, numObjects) - require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucket), userID, cortex_tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, users.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucket), userID, users.NewTenantDeletionMark(time.Now()))) numObjects++ // For deletion marker db, err := i.getTSDB(userID) @@ -4422,7 +4423,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin bucket := objstore.NewInMemBucket() // Write tenant deletion mark. - require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucket), userID, cortex_tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, users.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucket), userID, users.NewTenantDeletionMark(time.Now()))) i.TSDBState.bucket = bucket require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) diff --git a/pkg/parquetconverter/converter.go b/pkg/parquetconverter/converter.go index 477149705c7..261947a5bf2 100644 --- a/pkg/parquetconverter/converter.go +++ b/pkg/parquetconverter/converter.go @@ -35,12 +35,11 @@ import ( cortex_parquet "github.com/cortexproject/cortex/pkg/storage/parquet" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" cortex_errors "github.com/cortexproject/cortex/pkg/util/errors" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -193,17 +192,17 @@ func (c *Converter) running(ctx context.Context) error { return ctx.Err() case <-t.C: level.Info(c.logger).Log("msg", "start scanning users") - users, err := c.discoverUsers(ctx) + userIds, err := c.discoverUsers(ctx) if err != nil { level.Error(c.logger).Log("msg", "failed to scan users", "err", err) continue } ownedUsers := map[string]struct{}{} - rand.Shuffle(len(users), func(i, j int) { - users[i], users[j] = users[j], users[i] + rand.Shuffle(len(userIds), func(i, j int) { + userIds[i], userIds[j] = userIds[j], userIds[i] }) - for _, userID := range users { + for _, userID := range userIds { if ctx.Err() != nil { return ctx.Err() } @@ -234,7 +233,7 @@ func (c *Converter) running(ctx context.Context) error { continue } - if markedForDeletion, err := cortex_tsdb.TenantDeletionMarkExists(ctx, c.bkt, userID); err != nil { + if markedForDeletion, err := users.TenantDeletionMarkExists(ctx, c.bkt, userID); err != nil { level.Warn(userLogger).Log("msg", "unable to check if user is marked for deletion", "user", userID, "err", err) continue } else if markedForDeletion { @@ -513,7 +512,7 @@ func (c *Converter) isPermissionDeniedErr(err error) bool { } func (c *Converter) ownUser(r ring.ReadRing, userId string) (bool, error) { - if userId == tenant.GlobalMarkersDir { + if userId == users.GlobalMarkersDir { // __markers__ is reserved for global markers and no tenant should be allowed to have that name. return false, nil } diff --git a/pkg/parquetconverter/converter_test.go b/pkg/parquetconverter/converter_test.go index 81caa86c63d..0271e4ab3a3 100644 --- a/pkg/parquetconverter/converter_test.go +++ b/pkg/parquetconverter/converter_test.go @@ -33,12 +33,12 @@ import ( "github.com/cortexproject/cortex/pkg/storage/parquet" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util/concurrency" cortex_errors "github.com/cortexproject/cortex/pkg/util/errors" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -137,7 +137,7 @@ func TestConverter(t *testing.T) { require.Contains(t, syncedTenants, user) // Mark user as deleted - require.NoError(t, cortex_tsdb.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucketClient), user, cortex_tsdb.NewTenantDeletionMark(time.Now()))) + require.NoError(t, users.WriteTenantDeletionMark(context.Background(), objstore.WithNoopInstr(bucketClient), user, users.NewTenantDeletionMark(time.Now()))) // Should clean sync folders test.Poll(t, time.Minute, 0, func() any { @@ -190,8 +190,8 @@ func prepare(t *testing.T, cfg Config, bucketClient objstore.InstrumentedBucket, overrides := validation.NewOverrides(*limits, tenantLimits) - scanner, err := users.NewScanner(cortex_tsdb.UsersScannerConfig{ - Strategy: cortex_tsdb.UserScanStrategyList, + scanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, }, bucketClient, logger, registry) require.NoError(t, err) c := newConverter(cfg, bucketClient, storageCfg, blockRanges.ToMilliseconds(), logger, registry, overrides, scanner) diff --git a/pkg/purger/tenant_deletion_api.go b/pkg/purger/tenant_deletion_api.go index 6604f2ce8fd..bc82bef9fec 100644 --- a/pkg/purger/tenant_deletion_api.go +++ b/pkg/purger/tenant_deletion_api.go @@ -15,8 +15,8 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/users" ) type TenantDeletionAPI struct { @@ -44,7 +44,7 @@ func newTenantDeletionAPI(bkt objstore.InstrumentedBucket, cfgProvider bucket.Te func (api *TenantDeletionAPI) DeleteTenant(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { // When Cortex is running, it uses Auth Middleware for checking X-Scope-OrgID and injecting tenant into context. // Auth Middleware sends http.StatusUnauthorized if X-Scope-OrgID is missing, so we do too here, for consistency. @@ -52,7 +52,7 @@ func (api *TenantDeletionAPI) DeleteTenant(w http.ResponseWriter, r *http.Reques return } - err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, cortex_tsdb.NewTenantDeletionMark(time.Now())) + err = users.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, users.NewTenantDeletionMark(time.Now())) if err != nil { level.Error(api.logger).Log("msg", "failed to write tenant deletion mark", "user", userID, "err", err) @@ -72,7 +72,7 @@ type DeleteTenantStatusResponse struct { func (api *TenantDeletionAPI) DeleteTenantStatus(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/pkg/purger/tenant_deletion_api_test.go b/pkg/purger/tenant_deletion_api_test.go index 6667438b1b8..4b3cdd61c96 100644 --- a/pkg/purger/tenant_deletion_api_test.go +++ b/pkg/purger/tenant_deletion_api_test.go @@ -12,7 +12,7 @@ import ( "github.com/thanos-io/objstore" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestDeleteTenant(t *testing.T) { @@ -34,7 +34,7 @@ func TestDeleteTenant(t *testing.T) { api.DeleteTenant(resp, req.WithContext(ctx)) require.Equal(t, http.StatusOK, resp.Code) - exists, err := tsdb.TenantDeletionMarkExists(ctx, bkt, "fake") + exists, err := users.TenantDeletionMarkExists(ctx, bkt, "fake") require.NoError(t, err) require.True(t, exists) } diff --git a/pkg/querier/blocks_finder_bucket_index_test.go b/pkg/querier/blocks_finder_bucket_index_test.go index d5404dbc8b3..22c09e1f55f 100644 --- a/pkg/querier/blocks_finder_bucket_index_test.go +++ b/pkg/querier/blocks_finder_bucket_index_test.go @@ -14,12 +14,10 @@ import ( "github.com/thanos-io/objstore" "github.com/cortexproject/cortex/pkg/storage/bucket" - - "github.com/cortexproject/cortex/pkg/util/validation" - "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util/services" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/validation" ) func TestBucketIndexBlocksFinder_GetBlocks(t *testing.T) { diff --git a/pkg/querier/blocks_finder_bucket_scan.go b/pkg/querier/blocks_finder_bucket_scan.go index aef1543cc9e..976b1a7aefa 100644 --- a/pkg/querier/blocks_finder_bucket_scan.go +++ b/pkg/querier/blocks_finder_bucket_scan.go @@ -25,12 +25,12 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/backoff" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) var ( diff --git a/pkg/querier/blocks_finder_bucket_scan_test.go b/pkg/querier/blocks_finder_bucket_scan_test.go index 9313afffdf4..929a0900ca7 100644 --- a/pkg/querier/blocks_finder_bucket_scan_test.go +++ b/pkg/querier/blocks_finder_bucket_scan_test.go @@ -22,9 +22,9 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util/services" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestBucketScanBlocksFinder_InitialScan(t *testing.T) { @@ -87,8 +87,8 @@ func TestBucketScanBlocksFinder_InitialScanFailure(t *testing.T) { cfg := prepareBucketScanBlocksFinderConfig() cfg.CacheDir = t.TempDir() - usersScanner, err := users.NewScanner(cortex_tsdb.UsersScannerConfig{ - Strategy: cortex_tsdb.UserScanStrategyList, + usersScanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, MaxStalePeriod: time.Hour, CacheTTL: 0, }, bucket, log.NewNopLogger(), reg) @@ -103,8 +103,8 @@ func TestBucketScanBlocksFinder_InitialScanFailure(t *testing.T) { bucket.MockIter("", []string{"user-1"}, nil) bucket.MockIter("__markers__", []string{}, nil) bucket.MockIter("user-1/", []string{"user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json"}, nil) - bucket.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucket.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + bucket.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucket.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) bucket.MockGet("user-1/01DTVP434PA9VFXSW2JKB3392D/meta.json", "invalid", errors.New("mocked error")) require.NoError(t, s.StartAsync(ctx)) @@ -152,8 +152,8 @@ func TestBucketScanBlocksFinder_StopWhileRunningTheInitialScanOnManyTenants(t *t bucket.MockIterWithCallback(tenantID+"/", []string{}, nil, func() { time.Sleep(time.Second) }) - bucket.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(tenantID), false, nil) - bucket.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(tenantID), false, nil) + bucket.MockExists(users.GetGlobalDeletionMarkPath(tenantID), false, nil) + bucket.MockExists(users.GetLocalDeletionMarkPath(tenantID), false, nil) } cfg := prepareBucketScanBlocksFinderConfig() @@ -162,8 +162,8 @@ func TestBucketScanBlocksFinder_StopWhileRunningTheInitialScanOnManyTenants(t *t cfg.TenantsConcurrency = 1 reg := prometheus.NewRegistry() - usersScanner, err := users.NewScanner(cortex_tsdb.UsersScannerConfig{ - Strategy: cortex_tsdb.UserScanStrategyList, + usersScanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, MaxStalePeriod: time.Hour, CacheTTL: 0, }, bucket, log.NewNopLogger(), reg) @@ -206,8 +206,8 @@ func TestBucketScanBlocksFinder_StopWhileRunningTheInitialScanOnManyBlocks(t *te cfg.TenantsConcurrency = 1 reg := prometheus.NewRegistry() - usersScanner, err := users.NewScanner(cortex_tsdb.UsersScannerConfig{ - Strategy: cortex_tsdb.UserScanStrategyList, + usersScanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, MaxStalePeriod: time.Hour, CacheTTL: 0, }, bucket, log.NewNopLogger(), reg) @@ -526,8 +526,8 @@ func prepareBucketScanBlocksFinder(t *testing.T, cfg BucketScanBlocksFinderConfi cfg.CacheDir = t.TempDir() // Create a user scanner with list strategy - usersScanner, err := users.NewScanner(cortex_tsdb.UsersScannerConfig{ - Strategy: cortex_tsdb.UserScanStrategyList, + usersScanner, err := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, MaxStalePeriod: time.Hour, CacheTTL: 0, }, bkt, log.NewNopLogger(), reg) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 8c7ae1f41f4..3f4e335f0c2 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -44,16 +44,15 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/multierror" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -338,7 +337,7 @@ func (q *blocksStoreQuerier) Select(ctx context.Context, _ bool, sp *storage.Sel } func (q *blocksStoreQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, nil, err } @@ -381,7 +380,7 @@ func (q *blocksStoreQuerier) LabelNames(ctx context.Context, hints *storage.Labe } func (q *blocksStoreQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, nil, err } @@ -428,7 +427,7 @@ func (q *blocksStoreQuerier) Close() error { } func (q *blocksStoreQuerier) selectSorted(ctx context.Context, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return storage.ErrSeriesSet(err) } diff --git a/pkg/querier/distributor_queryable.go b/pkg/querier/distributor_queryable.go index 025f7e1ecf1..f57f32bda55 100644 --- a/pkg/querier/distributor_queryable.go +++ b/pkg/querier/distributor_queryable.go @@ -18,11 +18,11 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier/partialdata" "github.com/cortexproject/cortex/pkg/querier/series" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/backoff" "github.com/cortexproject/cortex/pkg/util/chunkcompat" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" ) const retryMinBackoff = time.Millisecond @@ -361,7 +361,7 @@ func (q *distributorQuerier) Close() error { } func (q *distributorQuerier) partialDataEnabled(ctx context.Context) bool { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return false } diff --git a/pkg/querier/parquet_queryable.go b/pkg/querier/parquet_queryable.go index 502a635534b..0ea06056b7d 100644 --- a/pkg/querier/parquet_queryable.go +++ b/pkg/querier/parquet_queryable.go @@ -30,11 +30,11 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/multierror" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -143,19 +143,19 @@ func NewParquetQueryable( queryable.WithRowCountLimitFunc(func(ctx context.Context) int64 { // Ignore error as this shouldn't happen. // If failed to resolve tenant we will just use the default limit value. - userID, _ := tenant.TenantID(ctx) + userID, _ := users.TenantID(ctx) return int64(limits.ParquetMaxFetchedRowCount(userID)) }), queryable.WithChunkBytesLimitFunc(func(ctx context.Context) int64 { // Ignore error as this shouldn't happen. // If failed to resolve tenant we will just use the default limit value. - userID, _ := tenant.TenantID(ctx) + userID, _ := users.TenantID(ctx) return int64(limits.ParquetMaxFetchedChunkBytes(userID)) }), queryable.WithDataBytesLimitFunc(func(ctx context.Context) int64 { // Ignore error as this shouldn't happen. // If failed to resolve tenant we will just use the default limit value. - userID, _ := tenant.TenantID(ctx) + userID, _ := users.TenantID(ctx) return int64(limits.ParquetMaxFetchedDataBytes(userID)) }), queryable.WithMaterializedLabelsFilterCallback(materializedLabelsFilterCallback), @@ -193,7 +193,7 @@ func NewParquetQueryable( }), } parquetQueryable, err := queryable.NewParquetQueryable(cDecoder, func(ctx context.Context, mint, maxt int64) ([]parquet_storage.ParquetShard, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, err } @@ -543,7 +543,7 @@ func (q *parquetQuerierWithFallback) Close() error { } func (q *parquetQuerierWithFallback) getBlocks(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher) ([]*bucketindex.Block, []*bucketindex.Block, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, nil, err } diff --git a/pkg/querier/parquet_queryable_test.go b/pkg/querier/parquet_queryable_test.go index e842a69dda8..c570f9662cc 100644 --- a/pkg/querier/parquet_queryable_test.go +++ b/pkg/querier/parquet_queryable_test.go @@ -35,11 +35,11 @@ import ( "github.com/cortexproject/cortex/pkg/storage/parquet" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/services" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" "github.com/cortexproject/cortex/pkg/util/validation" ) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index f84f07b9674..334b6dcc685 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -28,12 +28,12 @@ import ( "github.com/cortexproject/cortex/pkg/querier/lazyquery" "github.com/cortexproject/cortex/pkg/querier/partialdata" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -325,7 +325,7 @@ type querier struct { func (q querier) setupFromCtx(ctx context.Context) (context.Context, *querier_stats.QueryStats, string, int64, int64, storage.Querier, []storage.Querier, error) { stats := querier_stats.FromContext(ctx) - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return ctx, stats, userID, 0, 0, nil, nil, err } diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable.go b/pkg/querier/tenantfederation/exemplar_merge_queryable.go index 33e16ba276a..286c67f363e 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable.go @@ -12,9 +12,9 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" ) // NewExemplarQueryable returns a exemplarQueryable that iterates through all the @@ -35,7 +35,7 @@ func NewExemplarQueryable(upstream storage.ExemplarQueryable, maxConcurrent int, func tenantExemplarQuerierCallback(exemplarQueryable storage.ExemplarQueryable) MergeExemplarQuerierCallback { return func(ctx context.Context) ([]string, []storage.ExemplarQuerier, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, nil, err } diff --git a/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go b/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go index b52bf1b0828..994d51e51ea 100644 --- a/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go +++ b/pkg/querier/tenantfederation/exemplar_merge_queryable_test.go @@ -18,10 +18,9 @@ import ( "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" ) var ( @@ -62,7 +61,7 @@ type mockExemplarQueryable struct { func (m *mockExemplarQueryable) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) { // Due to lint check for `ensure the query path is supporting multiple tenants` - ids, err := tenant.TenantIDs(ctx) + ids, err := users.TenantIDs(ctx) if err != nil { return nil, err } @@ -124,7 +123,7 @@ func getFixtureExemplarResult2() []exemplar.QueryResult { func Test_MergeExemplarQuerier_Select(t *testing.T) { // set a multi tenant resolver - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) tests := []struct { name string @@ -325,20 +324,20 @@ func Test_MergeExemplarQuerier_Select_WhenUseRegexResolver(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClientFactory := func(ctx context.Context) (objstore.InstrumentedBucket, error) { return bucketClient, nil } - usersScannerConfig := cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} tenantFederationConfig := Config{UserSyncInterval: time.Second} regexResolver, err := NewRegexResolver(usersScannerConfig, tenantFederationConfig, reg, bucketClientFactory, log.NewNopLogger()) require.NoError(t, err) - tenant.WithDefaultResolver(regexResolver) + users.WithDefaultResolver(regexResolver) require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers diff --git a/pkg/querier/tenantfederation/merge_queryable.go b/pkg/querier/tenantfederation/merge_queryable.go index 3a69a6cc8da..12359990c72 100644 --- a/pkg/querier/tenantfederation/merge_queryable.go +++ b/pkg/querier/tenantfederation/merge_queryable.go @@ -15,9 +15,9 @@ import ( "github.com/prometheus/prometheus/util/annotations" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -44,7 +44,7 @@ func NewQueryable(upstream storage.Queryable, maxConcurrent int, byPassWithSingl func tenantQuerierCallback(queryable storage.Queryable) MergeQuerierCallback { return func(ctx context.Context, mint int64, maxt int64) ([]string, []storage.Querier, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, nil, err } diff --git a/pkg/querier/tenantfederation/merge_queryable_test.go b/pkg/querier/tenantfederation/merge_queryable_test.go index df1ed124683..e06d86909c6 100644 --- a/pkg/querier/tenantfederation/merge_queryable_test.go +++ b/pkg/querier/tenantfederation/merge_queryable_test.go @@ -26,11 +26,10 @@ import ( "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -46,13 +45,13 @@ const ( seriesWithLabelNames = "series_with_label_names" ) -// mockTenantQueryableWithFilter is a storage.Queryable that can be use to return specific warnings or errors by tenant. +// mockTenantQueryableWithFilter is a storage.Queryable that can be use to return specific warnings or errors by users. type mockTenantQueryableWithFilter struct { // extraLabels are labels added to all series for all tenants. extraLabels []string - // warningsByTenant are warnings that will be returned for queries of that tenant. + // warningsByTenant are warnings that will be returned for queries of that users. warningsByTenant map[string]annotations.Annotations - // queryErrByTenant is an error that will be returne for queries of that tenant. + // queryErrByTenant is an error that will be returne for queries of that users. queryErrByTenant map[string]error } @@ -80,9 +79,9 @@ type mockTenantQuerier struct { warnings annotations.Annotations queryErr error - // warningsByTenant are warnings that will be returned for queries of that tenant. + // warningsByTenant are warnings that will be returned for queries of that users. warningsByTenant map[string]annotations.Annotations - // queryErrByTenant is an error that will be returne for queries of that tenant. + // queryErrByTenant is an error that will be returne for queries of that users. queryErrByTenant map[string]error } @@ -155,7 +154,7 @@ func (m *mockSeriesSet) Warnings() annotations.Annotations { // Select implements the storage.Querier interface. func (m mockTenantQuerier) Select(ctx context.Context, _ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return storage.ErrSeriesSet(err) } @@ -194,7 +193,7 @@ func (m mockTenantQuerier) Select(ctx context.Context, _ bool, sp *storage.Selec // LabelValues implements the storage.LabelQuerier interface. // The mockTenantQuerier returns all a sorted slice of all label values and does not support reducing the result set with matchers. func (m mockTenantQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, nil, err } @@ -242,7 +241,7 @@ func (m mockTenantQuerier) LabelValues(ctx context.Context, name string, hints * // If only one matcher is provided with label Name=seriesWithLabelNames then the resulting set will have the values of that matchers pipe-split appended. // I.e. querying for {seriesWithLabelNames="foo|bar|baz"} will have as result [bar, baz, foo, ] func (m mockTenantQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, nil, err } @@ -665,22 +664,22 @@ func TestMergeQueryable_Select(t *testing.T) { bucketClient.MockIter("", scenario.tenants, nil) bucketClient.MockIter("__markers__", []string{}, nil) - for _, tenant := range scenario.tenants { - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(tenant), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(tenant), false, nil) + for _, scenarioTenant := range scenario.tenants { + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(scenarioTenant), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(scenarioTenant), false, nil) } bucketClientFactory := func(ctx context.Context) (objstore.InstrumentedBucket, error) { return bucketClient, nil } - usersScannerConfig := cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} tenantFederationConfig := Config{UserSyncInterval: time.Second} regexResolver, err := NewRegexResolver(usersScannerConfig, tenantFederationConfig, reg, bucketClientFactory, log.NewNopLogger()) require.NoError(t, err) // set a regex tenant resolver - tenant.WithDefaultResolver(regexResolver) + users.WithDefaultResolver(regexResolver) require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers @@ -691,7 +690,7 @@ func TestMergeQueryable_Select(t *testing.T) { ctx = user.InjectOrgID(ctx, "team-.+") } else { // Set a multi tenant resolver. - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) // inject tenants into context if len(scenario.tenants) > 0 { @@ -864,21 +863,21 @@ func TestMergeQueryable_LabelNames(t *testing.T) { bucketClient.MockIter("", scenario.tenants, nil) bucketClient.MockIter("__markers__", []string{}, nil) - for _, tenant := range scenario.tenants { - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(tenant), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(tenant), false, nil) + for _, scenarioTenant := range scenario.tenants { + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(scenarioTenant), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(scenarioTenant), false, nil) } bucketClientFactory := func(ctx context.Context) (objstore.InstrumentedBucket, error) { return bucketClient, nil } - usersScannerConfig := cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} tenantFederationConfig := Config{UserSyncInterval: time.Second} regexResolver, err := NewRegexResolver(usersScannerConfig, tenantFederationConfig, reg, bucketClientFactory, log.NewNopLogger()) require.NoError(t, err) // set a regex tenant resolver - tenant.WithDefaultResolver(regexResolver) + users.WithDefaultResolver(regexResolver) require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers @@ -889,7 +888,7 @@ func TestMergeQueryable_LabelNames(t *testing.T) { ctx = user.InjectOrgID(ctx, "team-.+") } else { // Set a multi tenant resolver. - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) // inject tenants into context if len(scenario.tenants) > 0 { @@ -1101,21 +1100,21 @@ func TestMergeQueryable_LabelValues(t *testing.T) { bucketClient.MockIter("", scenario.tenants, nil) bucketClient.MockIter("__markers__", []string{}, nil) - for _, tenant := range scenario.tenants { - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(tenant), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(tenant), false, nil) + for _, scenarioTenant := range scenario.tenants { + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(scenarioTenant), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(scenarioTenant), false, nil) } bucketClientFactory := func(ctx context.Context) (objstore.InstrumentedBucket, error) { return bucketClient, nil } - usersScannerConfig := cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} tenantFederationConfig := Config{UserSyncInterval: time.Second} regexResolver, err := NewRegexResolver(usersScannerConfig, tenantFederationConfig, reg, bucketClientFactory, log.NewNopLogger()) require.NoError(t, err) // set a regex tenant resolver - tenant.WithDefaultResolver(regexResolver) + users.WithDefaultResolver(regexResolver) require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers @@ -1126,7 +1125,7 @@ func TestMergeQueryable_LabelValues(t *testing.T) { ctx = user.InjectOrgID(ctx, "team-.+") } else { // Set a multi tenant resolver. - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) // inject tenants into context if len(scenario.tenants) > 0 { @@ -1210,7 +1209,7 @@ func TestTracingMergeQueryable(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "team-a|team-b") // set a multi tenant resolver - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) filter := mockTenantQueryableWithFilter{} q := NewQueryable(&filter, defaultMaxConcurrency, false, nil) // retrieve querier if set diff --git a/pkg/querier/tenantfederation/metadata_merge_querier.go b/pkg/querier/tenantfederation/metadata_merge_querier.go index 37e5a63f5e1..7c61d587680 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier.go @@ -12,9 +12,9 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" ) // NewMetadataQuerier returns a MetadataQuerier that merges metric @@ -50,7 +50,7 @@ func (m *mergeMetadataQuerier) MetricsMetadata(ctx context.Context, req *client. log, ctx := spanlogger.New(ctx, "mergeMetadataQuerier.MetricsMetadata") defer log.Finish() - tenantIds, err := tenant.TenantIDs(ctx) + tenantIds, err := users.TenantIDs(ctx) if err != nil { return nil, err } diff --git a/pkg/querier/tenantfederation/metadata_merge_querier_test.go b/pkg/querier/tenantfederation/metadata_merge_querier_test.go index c04e4e3c0b9..fb4122f782a 100644 --- a/pkg/querier/tenantfederation/metadata_merge_querier_test.go +++ b/pkg/querier/tenantfederation/metadata_merge_querier_test.go @@ -17,10 +17,9 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" ) var ( @@ -61,7 +60,7 @@ type mockMetadataQuerier struct { func (m *mockMetadataQuerier) MetricsMetadata(ctx context.Context, _ *client.MetricsMetadataRequest) ([]scrape.MetricMetadata, error) { // Due to lint check for `ensure the query path is supporting multiple tenants` - ids, err := tenant.TenantIDs(ctx) + ids, err := users.TenantIDs(ctx) if err != nil { return nil, err } @@ -76,7 +75,7 @@ func (m *mockMetadataQuerier) MetricsMetadata(ctx context.Context, _ *client.Met func Test_mergeMetadataQuerier_MetricsMetadata(t *testing.T) { // set a multi tenant resolver - tenant.WithDefaultResolver(tenant.NewMultiResolver()) + users.WithDefaultResolver(users.NewMultiResolver()) tests := []struct { name string @@ -159,20 +158,20 @@ func Test_mergeMetadataQuerier_MetricsMetadata_WhenUseRegexResolver(t *testing.T bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{"user-1", "user-2"}, nil) bucketClient.MockIter("__markers__", []string{}, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-1"), false, nil) + bucketClient.MockExists(users.GetGlobalDeletionMarkPath("user-2"), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath("user-2"), false, nil) bucketClientFactory := func(ctx context.Context) (objstore.InstrumentedBucket, error) { return bucketClient, nil } - usersScannerConfig := cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} tenantFederationConfig := Config{UserSyncInterval: time.Second} regexResolver, err := NewRegexResolver(usersScannerConfig, tenantFederationConfig, reg, bucketClientFactory, log.NewNopLogger()) require.NoError(t, err) - tenant.WithDefaultResolver(regexResolver) + users.WithDefaultResolver(regexResolver) require.NoError(t, services.StartAndAwaitRunning(context.Background(), regexResolver)) // wait update knownUsers diff --git a/pkg/querier/tenantfederation/regex_resolver.go b/pkg/querier/tenantfederation/regex_resolver.go index 274bb4f1d98..cab49303f5b 100644 --- a/pkg/querier/tenantfederation/regex_resolver.go +++ b/pkg/querier/tenantfederation/regex_resolver.go @@ -19,10 +19,8 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) var ( @@ -48,7 +46,7 @@ type RegexResolver struct { discoveredUsers prometheus.Gauge } -func NewRegexResolver(cfg tsdb.UsersScannerConfig, tenantFederationCfg Config, reg prometheus.Registerer, bucketClientFactory func(ctx context.Context) (objstore.InstrumentedBucket, error), logger log.Logger) (*RegexResolver, error) { +func NewRegexResolver(cfg users.UsersScannerConfig, tenantFederationCfg Config, reg prometheus.Registerer, bucketClientFactory func(ctx context.Context) (objstore.InstrumentedBucket, error), logger log.Logger) (*RegexResolver, error) { bucketClient, err := bucketClientFactory(context.Background()) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") @@ -133,7 +131,7 @@ func (r *RegexResolver) TenantIDs(ctx context.Context) ([]string, error) { return nil, err } - return tenant.ValidateOrgIDs(orgIDs) + return users.ValidateOrgIDs(orgIDs) } func (r *RegexResolver) getRegexMatchedOrgIds(orgID string) ([]string, error) { @@ -154,7 +152,7 @@ func (r *RegexResolver) getRegexMatchedOrgIds(orgID string) ([]string, error) { } if len(matched) == 0 { - if err := tenant.ValidTenantID(orgID); err == nil { + if err := users.ValidTenantID(orgID); err == nil { // when querying for a newly created orgID, the query may not // work because it has not been uploaded to object storage. // To make the query work (not breaking existing behavior), @@ -195,11 +193,11 @@ func (r *RegexValidator) TenantID(ctx context.Context) (string, error) { return "", errInvalidRegex } - if err := tenant.CheckTenantIDLength(id); err != nil { + if err := users.CheckTenantIDLength(id); err != nil { return "", err } - if err := tenant.CheckTenantIDIsSupported(id); err != nil { + if err := users.CheckTenantIDIsSupported(id); err != nil { return "", err } diff --git a/pkg/querier/tenantfederation/regex_resolver_test.go b/pkg/querier/tenantfederation/regex_resolver_test.go index 03735e8b1cd..8e9d274979b 100644 --- a/pkg/querier/tenantfederation/regex_resolver_test.go +++ b/pkg/querier/tenantfederation/regex_resolver_test.go @@ -15,9 +15,9 @@ import ( "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" ) func Test_RegexResolver(t *testing.T) { @@ -80,16 +80,16 @@ func Test_RegexResolver(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", tc.existingTenants, nil) bucketClient.MockIter("__markers__", []string{}, nil) - for _, tenant := range tc.existingTenants { - bucketClient.MockExists(cortex_tsdb.GetGlobalDeletionMarkPath(tenant), false, nil) - bucketClient.MockExists(cortex_tsdb.GetLocalDeletionMarkPath(tenant), false, nil) + for _, existingTenant := range tc.existingTenants { + bucketClient.MockExists(users.GetGlobalDeletionMarkPath(existingTenant), false, nil) + bucketClient.MockExists(users.GetLocalDeletionMarkPath(existingTenant), false, nil) } bucketClientFactory := func(ctx context.Context) (objstore.InstrumentedBucket, error) { return bucketClient, nil } - usersScannerConfig := cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList} + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} tenantFederationConfig := Config{UserSyncInterval: time.Second, MaxTenant: tc.maxTenants} regexResolver, err := NewRegexResolver(usersScannerConfig, tenantFederationConfig, reg, bucketClientFactory, log.NewNopLogger()) require.NoError(t, err) diff --git a/pkg/querier/tripperware/instantquery/limits.go b/pkg/querier/tripperware/instantquery/limits.go index 477fe4c36f4..98f7e48d89e 100644 --- a/pkg/querier/tripperware/instantquery/limits.go +++ b/pkg/querier/tripperware/instantquery/limits.go @@ -9,9 +9,9 @@ import ( cortexparser "github.com/cortexproject/cortex/pkg/parser" "github.com/cortexproject/cortex/pkg/querier/tripperware" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/promql" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -38,7 +38,7 @@ func (l limitsMiddleware) Do(ctx context.Context, r tripperware.Request) (trippe log, ctx := spanlogger.New(ctx, "limits") defer log.Finish() - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } diff --git a/pkg/querier/tripperware/queryrange/limits.go b/pkg/querier/tripperware/queryrange/limits.go index 7b1f17b55a9..77be7f332f1 100644 --- a/pkg/querier/tripperware/queryrange/limits.go +++ b/pkg/querier/tripperware/queryrange/limits.go @@ -11,10 +11,10 @@ import ( cortexparser "github.com/cortexproject/cortex/pkg/parser" "github.com/cortexproject/cortex/pkg/querier/tripperware" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/promql" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -41,7 +41,7 @@ func (l limitsMiddleware) Do(ctx context.Context, r tripperware.Request) (trippe log, ctx := spanlogger.New(ctx, "limits") defer log.Finish() - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } diff --git a/pkg/querier/tripperware/queryrange/results_cache.go b/pkg/querier/tripperware/queryrange/results_cache.go index 96f24516bee..204ac7fead0 100644 --- a/pkg/querier/tripperware/queryrange/results_cache.go +++ b/pkg/querier/tripperware/queryrange/results_cache.go @@ -32,10 +32,10 @@ import ( "github.com/cortexproject/cortex/pkg/querier/partialdata" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tripperware" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -222,7 +222,7 @@ func NewResultsCacheMiddleware( } func (s resultsCache) Do(ctx context.Context, r tripperware.Request) (tripperware.Response, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) respWithStats := r.GetStats() != "" && s.cacheQueryableSamplesStats if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) @@ -240,7 +240,7 @@ func (s resultsCache) Do(ctx context.Context, r tripperware.Request) (tripperwar return s.next.Do(ctx, r) } - key := s.splitter.GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), r) + key := s.splitter.GenerateCacheKey(ctx, users.JoinTenantIDs(tenantIDs), r) var ( extents []tripperware.Extent diff --git a/pkg/querier/tripperware/queryrange/results_cache_test.go b/pkg/querier/tripperware/queryrange/results_cache_test.go index 05e968fb6ec..6e5b95fdf5c 100644 --- a/pkg/querier/tripperware/queryrange/results_cache_test.go +++ b/pkg/querier/tripperware/queryrange/results_cache_test.go @@ -21,8 +21,8 @@ import ( "github.com/cortexproject/cortex/pkg/querier/partialdata" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tripperware" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -1578,9 +1578,9 @@ func TestResultsCacheFillCompatibility(t *testing.T) { require.NoError(t, err) // Check cache and make sure we write response in old format even though the response is new format. - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) require.NoError(t, err) - key := splitter(day).GenerateCacheKey(ctx, tenant.JoinTenantIDs(tenantIDs), parsedRequest) + key := splitter(day).GenerateCacheKey(ctx, users.JoinTenantIDs(tenantIDs), parsedRequest) cacheKey := cache.HashKey(key) found, bufs, _ := c.Fetch(ctx, []string{cacheKey}) diff --git a/pkg/querier/tripperware/queryrange/split_by_interval.go b/pkg/querier/tripperware/queryrange/split_by_interval.go index 980d2867a87..52198e7abc9 100644 --- a/pkg/querier/tripperware/queryrange/split_by_interval.go +++ b/pkg/querier/tripperware/queryrange/split_by_interval.go @@ -14,8 +14,8 @@ import ( cortexparser "github.com/cortexproject/cortex/pkg/parser" querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/querier/tripperware" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -220,7 +220,7 @@ func dynamicIntervalFn(cfg Config, limits tripperware.Limits, queryAnalyzer quer } func getMaxVerticalShardSize(ctx context.Context, r tripperware.Request, limits tripperware.Limits, queryAnalyzer querysharding.Analyzer) (int, bool, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return 1, false, err } diff --git a/pkg/querier/tripperware/roundtrip.go b/pkg/querier/tripperware/roundtrip.go index 2ff717bae84..98366cfc6b1 100644 --- a/pkg/querier/tripperware/roundtrip.go +++ b/pkg/querier/tripperware/roundtrip.go @@ -31,10 +31,10 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/requestmeta" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -131,7 +131,7 @@ func NewQueryTripperware( Help: "Total rejected queries per tenant.", }, []string{"op", "user"}) - activeUsers := util.NewActiveUsersCleanupWithDefaultValues(func(user string) { + activeUsers := users.NewActiveUsersCleanupWithDefaultValues(func(user string) { err := util.DeleteMatchingLabels(queriesPerTenant, map[string]string{"user": user}) if err != nil { level.Warn(log).Log("msg", "failed to remove cortex_query_frontend_queries_total metric for user", "user", user) @@ -179,13 +179,13 @@ func NewQueryTripperware( op = opTypeParseQuery } - tenantIDs, err := tenant.TenantIDs(r.Context()) + tenantIDs, err := users.TenantIDs(r.Context()) // This should never happen anyways because we have auth middleware before this. if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } now := time.Now() - userStr := tenant.JoinTenantIDs(tenantIDs) + userStr := users.JoinTenantIDs(tenantIDs) activeUsers.UpdateUserTimestamp(userStr, now) source := GetSource(r) queriesPerTenant.WithLabelValues(op, source, userStr).Inc() diff --git a/pkg/querier/tripperware/shard_by.go b/pkg/querier/tripperware/shard_by.go index 9053e522e2b..1d68ec24d6a 100644 --- a/pkg/querier/tripperware/shard_by.go +++ b/pkg/querier/tripperware/shard_by.go @@ -12,8 +12,8 @@ import ( querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" cquerysharding "github.com/cortexproject/cortex/pkg/querysharding" - "github.com/cortexproject/cortex/pkg/tenant" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -38,7 +38,7 @@ type shardBy struct { } func (s shardBy) Do(ctx context.Context, r Request) (Response, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) stats := querier_stats.FromContext(ctx) if err != nil { diff --git a/pkg/querier/tripperware/util.go b/pkg/querier/tripperware/util.go index 90f2224c115..57ea0279ac9 100644 --- a/pkg/querier/tripperware/util.go +++ b/pkg/querier/tripperware/util.go @@ -6,8 +6,9 @@ import ( "github.com/weaveworks/common/httpgrpc" + "github.com/cortexproject/cortex/pkg/util/users" + "github.com/cortexproject/cortex/pkg/querier/stats" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -19,7 +20,7 @@ type RequestResponse struct { // DoRequests executes a list of requests in parallel. The limits parameters is used to limit parallelism per single request. func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits Limits) ([]RequestResponse, error) { - tenantIDs, err := tenant.TenantIDs(ctx) + tenantIDs, err := users.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index 59e3303127e..86dafc528ed 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -24,9 +24,9 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/tenant" util_api "github.com/cortexproject/cortex/pkg/util/api" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" ) // In order to reimplement the prometheus rules API, a large amount of code was copied over @@ -122,7 +122,7 @@ func NewAPI(r *Ruler, s rulestore.RuleStore, logger log.Logger) *API { func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { logger := util_log.WithContext(req.Context(), a.logger) - userID, err := tenant.TenantID(req.Context()) + userID, err := users.TenantID(req.Context()) if err != nil || userID == "" { level.Error(logger).Log("msg", "error extracting org id from context", "err", err) util_api.RespondError(logger, w, v1.ErrBadData, "no valid org id found", http.StatusBadRequest) @@ -334,7 +334,7 @@ func parseExcludeAlerts(r *http.Request) (bool, error) { func (a *API) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { logger := util_log.WithContext(req.Context(), a.logger) - userID, err := tenant.TenantID(req.Context()) + userID, err := users.TenantID(req.Context()) if err != nil || userID == "" { level.Error(logger).Log("msg", "error extracting org id from context", "err", err) util_api.RespondError(logger, w, v1.ErrBadData, "no valid org id found", http.StatusBadRequest) @@ -473,7 +473,7 @@ func parseGroupName(params map[string]string) (string, error) { // and returns them in that order. It also allows users to require a namespace or group name and return // an error if it they can not be parsed. func parseRequest(req *http.Request, requireNamespace, requireGroup bool) (string, string, string, error) { - userID, err := tenant.TenantID(req.Context()) + userID, err := users.TenantID(req.Context()) if err != nil { return "", "", "", user.ErrNoOrgID } diff --git a/pkg/ruler/manager_metrics.go b/pkg/ruler/manager_metrics.go index 93acdc26b17..d6a3dd52c2d 100644 --- a/pkg/ruler/manager_metrics.go +++ b/pkg/ruler/manager_metrics.go @@ -5,6 +5,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/users" ) // ManagerMetrics aggregates metrics exported by the Prometheus @@ -307,10 +308,10 @@ func (m *RuleEvalMetrics) deletePerUserMetrics(userID string) { type RuleGroupMetrics struct { RuleGroupsInStore *prometheus.GaugeVec tenants map[string]struct{} - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants } -func NewRuleGroupMetrics(reg prometheus.Registerer, allowedTenants *util.AllowedTenants) *RuleGroupMetrics { +func NewRuleGroupMetrics(reg prometheus.Registerer, allowedTenants *users.AllowedTenants) *RuleGroupMetrics { m := &RuleGroupMetrics{ RuleGroupsInStore: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "cortex_ruler_rule_groups_in_store", diff --git a/pkg/ruler/manager_metrics_test.go b/pkg/ruler/manager_metrics_test.go index dfc9800ad52..1e6f7c158a3 100644 --- a/pkg/ruler/manager_metrics_test.go +++ b/pkg/ruler/manager_metrics_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestManagerMetricsWithRuleGroupLabel(t *testing.T) { @@ -606,7 +607,7 @@ func TestRuleEvalMetricsDeletePerUserMetrics(t *testing.T) { func TestRuleGroupMetrics(t *testing.T) { reg := prometheus.NewPedanticRegistry() - m := NewRuleGroupMetrics(reg, util.NewAllowedTenants(nil, []string{"fake3"})) + m := NewRuleGroupMetrics(reg, users.NewAllowedTenants(nil, []string{"fake3"})) m.UpdateRuleGroupsInStore(map[string]int{ "fake1": 10, "fake2": 20, diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 9b03acae15e..a2826cd2f84 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -36,7 +36,6 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_api "github.com/cortexproject/cortex/pkg/util/api" "github.com/cortexproject/cortex/pkg/util/concurrency" @@ -44,6 +43,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/grpcclient" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -341,10 +341,12 @@ type Ruler struct { rulerGetRulesFailures *prometheus.CounterVec ruleGroupMetrics *RuleGroupMetrics - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants registry prometheus.Registerer logger log.Logger + + userIndexUpdater *users.UserIndexUpdater } // NewRuler creates a new ruler from a distributor and chunk store. @@ -354,14 +356,15 @@ func NewRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, func newRuler(cfg Config, manager MultiTenantManager, reg prometheus.Registerer, logger log.Logger, ruleStore rulestore.RuleStore, limits RulesLimits, clientPool ClientsPool) (*Ruler, error) { ruler := &Ruler{ - cfg: cfg, - store: ruleStore, - manager: manager, - registry: reg, - logger: logger, - limits: limits, - clientsPool: clientPool, - allowedTenants: util.NewAllowedTenants(cfg.EnabledTenants, cfg.DisabledTenants), + cfg: cfg, + userIndexUpdater: ruleStore.GetUserIndexUpdater(), + store: ruleStore, + manager: manager, + registry: reg, + logger: logger, + limits: limits, + clientsPool: clientPool, + allowedTenants: users.NewAllowedTenants(cfg.EnabledTenants, cfg.DisabledTenants), ringCheckErrors: promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "cortex_ruler_ring_check_errors_total", @@ -694,6 +697,10 @@ func (r *Ruler) run(ctx context.Context) error { ringTickerChan = ringTicker.C } + if r.cfg.EnableSharding && r.userIndexUpdater != nil { + go r.userIndexUpdateLoop(ctx) + } + syncRuleErrMsg := func(syncRulesErr error) { level.Error(r.logger).Log("msg", "failed to sync rules", "err", syncRulesErr) } @@ -727,6 +734,48 @@ func (r *Ruler) run(ctx context.Context) error { } } +func (r *Ruler) userIndexUpdateLoop(ctx context.Context) { + // Hardcode ID to check which ruler owns updating user index. + userID := users.UserIndexCompressedFilename + // Align with clean up interval. + ticker := time.NewTicker(util.DurationWithJitter(r.store.GetUserIndexUpdater().GetCleanUpInterval(), 0.1)) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + level.Error(r.logger).Log("msg", "context timeout, exit user index update loop", "err", ctx.Err()) + return + case <-ticker.C: + owned := r.isUserOwned(userID) + if !owned { + continue + } + if err := r.userIndexUpdater.UpdateUserIndex(ctx); err != nil { + level.Error(r.logger).Log("msg", "failed to update user index", "err", err) + // Wait for next interval. Worst case, the user index scanner will fallback to list strategy. + continue + } + } + } +} + +func (r *Ruler) isUserOwned(userID string) bool { + // If sharding is disabled, any ruler instance owns all users. + if !r.cfg.EnableSharding { + return true + } + + rulers, err := r.ring.Get(users.ShardByUser(userID), RingOp, nil, nil, nil) + if err != nil { + r.ringCheckErrors.Inc() + level.Error(r.logger).Log("msg", "failed to get rulers from ring", "user", userID, "err", err) + return false + } + + return rulers.Includes(r.lifecycler.GetInstanceAddr()) +} + func (r *Ruler) syncRules(ctx context.Context, reason string) error { level.Info(r.logger).Log("msg", "syncing rules", "reason", reason) r.rulerSync.WithLabelValues(reason).Inc() @@ -1028,7 +1077,7 @@ func (r *Ruler) filterBackupRuleGroups(userID string, ruleGroups []*rulespb.Rule // GetRules retrieves the running rules from this ruler and all running rulers in the ring if // sharding is enabled func (r *Ruler) GetRules(ctx context.Context, rulesRequest RulesRequest) (*RulesResponse, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id found in context") } @@ -1448,7 +1497,7 @@ func (r *Ruler) getShardedRules(ctx context.Context, userID string, rulesRequest // Rules implements the rules service func (r *Ruler) Rules(ctx context.Context, in *RulesRequest) (*RulesResponse, error) { - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return nil, fmt.Errorf("no user id found in context") @@ -1502,7 +1551,7 @@ func (r *Ruler) AssertMaxRulesPerRuleGroup(userID string, rules int) error { func (r *Ruler) DeleteTenantConfiguration(w http.ResponseWriter, req *http.Request) { logger := util_log.WithContext(req.Context(), r.logger) - userID, err := tenant.TenantID(req.Context()) + userID, err := users.TenantID(req.Context()) if err != nil { // When Cortex is running, it uses Auth Middleware for checking X-Scope-OrgID and injecting tenant into context. // Auth Middleware sends http.StatusUnauthorized if X-Scope-OrgID is missing, so we do too here, for consistency. diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index 755eb49fa37..c2464f3a297 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -52,11 +52,11 @@ import ( "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/ruler/rulestore" "github.com/cortexproject/cortex/pkg/ruler/rulestore/bucketclient" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -392,7 +392,7 @@ func TestNotifierSendsUserIDHeader(t *testing.T) { // We do expect 1 API call for the user create with the getOrCreateNotifier() wg.Add(1) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) + userID, _, err := users.ExtractTenantIDFromHTTPRequest(r) assert.NoError(t, err) assert.Equal(t, userID, "1") wg.Done() @@ -2617,7 +2617,10 @@ func verifyExpectedDeletedRuleGroupsForUser(t *testing.T, r *Ruler, userID strin func setupRuleGroupsStore(t *testing.T, ruleGroups []ruleGroupKey) (*objstore.InMemBucket, rulestore.RuleStore) { bucketClient := objstore.NewInMemBucket() - rs := bucketclient.NewBucketRuleStore(bucketClient, nil, log.NewNopLogger()) + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + rs, err := bucketclient.NewBucketRuleStore(bucketClient, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) // "upload" rule groups for _, key := range ruleGroups { diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client.go b/pkg/ruler/rulestore/bucketclient/bucket_client.go index 8515ad910a7..00128672be7 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client.go @@ -13,13 +13,16 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/objstore" + "github.com/thanos-io/thanos/pkg/extprom" "golang.org/x/sync/errgroup" "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/ruler/rulestore" "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/util/multierror" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -42,14 +45,40 @@ type BucketRuleStore struct { bucket objstore.Bucket cfgProvider bucket.TenantConfigProvider logger log.Logger + + usersScanner users.Scanner + userIndexUpdater *users.UserIndexUpdater } -func NewBucketRuleStore(bkt objstore.Bucket, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *BucketRuleStore { - return &BucketRuleStore{ - bucket: bucket.NewPrefixedBucketClient(bkt, rulesPrefix), - cfgProvider: cfgProvider, - logger: logger, +func NewBucketRuleStore(bkt objstore.Bucket, userScannerCfg users.UsersScannerConfig, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (*BucketRuleStore, error) { + rulesBucket := bucket.NewPrefixedBucketClient(bkt, rulesPrefix) + + regWithComponent := extprom.WrapRegistererWith(prometheus.Labels{"component": "ruler"}, reg) + usersScanner, err := users.NewScanner(userScannerCfg, rulesBucket, logger, regWithComponent) + if err != nil { + return nil, errors.Wrap(err, "unable to initialize ruler users scanner") } + + var userIndexUpdater *users.UserIndexUpdater + if userScannerCfg.Strategy == users.UserScanStrategyUserIndex { + // We hardcode strategy to be list so can ignore error. + baseScanner, _ := users.NewScanner(users.UsersScannerConfig{ + Strategy: users.UserScanStrategyList, + }, rulesBucket, logger, regWithComponent) + userIndexUpdater = users.NewUserIndexUpdater(rulesBucket, userScannerCfg.CleanUpInterval, baseScanner, regWithComponent) + } + + return &BucketRuleStore{ + bucket: rulesBucket, + cfgProvider: cfgProvider, + logger: logger, + usersScanner: usersScanner, + userIndexUpdater: userIndexUpdater, + }, nil +} + +func (b *BucketRuleStore) GetUserIndexUpdater() *users.UserIndexUpdater { + return b.userIndexUpdater } // getRuleGroup loads and return a rules group. If existing rule group is supplied, it is Reset and reused. If nil, new RuleGroupDesc is allocated. @@ -94,16 +123,14 @@ func (b *BucketRuleStore) getRuleGroup(ctx context.Context, userID, namespace, g // ListAllUsers implements rules.RuleStore. func (b *BucketRuleStore) ListAllUsers(ctx context.Context) ([]string, error) { - var users []string - err := b.bucket.Iter(ctx, "", func(user string) error { - users = append(users, strings.TrimSuffix(user, objstore.DirDelim)) - return nil - }) + active, deleting, _, err := b.usersScanner.ScanUsers(ctx) if err != nil { return nil, fmt.Errorf("unable to list users in rule store bucket: %w", err) } - - return users, nil + userIDs := make([]string, 0, len(active)+len(deleting)) + userIDs = append(userIDs, active...) + userIDs = append(userIDs, deleting...) + return userIDs, nil } // ListAllRuleGroups implements rules.RuleStore. @@ -112,6 +139,11 @@ func (b *BucketRuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rul // List rule groups for all tenants. err := b.bucket.Iter(ctx, "", func(key string) error { + if key == users.UserIndexCompressedFilename { + // skip user-index.json.gz + return nil + } + userID, namespace, group, err := parseRuleGroupObjectKeyWithUser(key) if err != nil { level.Warn(b.logger).Log("msg", "invalid rule group object key found while listing rule groups", "key", key, "err", err) diff --git a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go index 0afa4b155cb..343a3354d7f 100644 --- a/pkg/ruler/rulestore/bucketclient/bucket_client_test.go +++ b/pkg/ruler/rulestore/bucketclient/bucket_client_test.go @@ -10,6 +10,7 @@ import ( "github.com/go-kit/log" "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/rulefmt" "github.com/stretchr/testify/assert" @@ -19,7 +20,8 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/ruler/rulestore" - "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" ) type testGroup struct { @@ -105,7 +107,10 @@ func TestListRules(t *testing.T) { func TestLoadPartialRules(t *testing.T) { bucketClient := objstore.NewInMemBucket() mockedBucketClient := &testutil.MockBucketFailure{Bucket: bucketClient, GetFailures: map[string]error{}} - bucketStore := NewBucketRuleStore(mockedBucketClient, nil, log.NewNopLogger()) + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() + bucketStore, err := NewBucketRuleStore(mockedBucketClient, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) groups := []testGroup{ {user: "user1", namespace: "hello", ruleGroup: rulefmt.RuleGroup{Name: "second testGroup", Interval: model.Duration(2 * time.Minute)}}, @@ -263,7 +268,10 @@ func TestDelete(t *testing.T) { func runForEachRuleStore(t *testing.T, testFn func(t *testing.T, store rulestore.RuleStore, bucketClient any)) { bucketClient := objstore.NewInMemBucket() - bucketStore := NewBucketRuleStore(bucketClient, nil, log.NewNopLogger()) + reg := prometheus.NewPedanticRegistry() + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + bucketStore, err := NewBucketRuleStore(bucketClient, usersScannerConfig, nil, log.NewNopLogger(), reg) + assert.NoError(t, err) stores := map[string]struct { store rulestore.RuleStore @@ -426,8 +434,11 @@ func TestListAllRuleGroupsWithNoNamespaceOrGroup(t *testing.T) { "rules/user3/bnM=/Z3JvdXAx", // namespace "ns", group "group1" }, } + usersScannerConfig := users.UsersScannerConfig{Strategy: users.UserScanStrategyList} + reg := prometheus.NewPedanticRegistry() - s := NewBucketRuleStore(obj, nil, log.NewNopLogger()) + s, err := NewBucketRuleStore(obj, usersScannerConfig, nil, log.NewNopLogger(), reg) + require.NoError(t, err) out, err := s.ListAllRuleGroups(context.Background()) require.NoError(t, err) diff --git a/pkg/ruler/rulestore/config.go b/pkg/ruler/rulestore/config.go index ef5c855eede..f468bc5eb01 100644 --- a/pkg/ruler/rulestore/config.go +++ b/pkg/ruler/rulestore/config.go @@ -9,13 +9,15 @@ import ( "github.com/cortexproject/cortex/pkg/ruler/rulestore/local" "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/users" ) // Config configures a rule store. type Config struct { bucket.Config `yaml:",inline"` - ConfigDB client.Config `yaml:"configdb"` - Local local.Config `yaml:"local"` + ConfigDB client.Config `yaml:"configdb"` + Local local.Config `yaml:"local"` + UsersScanner users.UsersScannerConfig `yaml:"users_scanner"` } // RegisterFlags registers the backend storage config. @@ -26,6 +28,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ConfigDB.RegisterFlagsWithPrefix(prefix, f) cfg.Local.RegisterFlagsWithPrefix(prefix, f) cfg.RegisterFlagsWithPrefix(prefix, f) + cfg.UsersScanner.RegisterFlagsWithPrefix(prefix, f) } // IsDefaults returns true if the storage options have not been set. diff --git a/pkg/ruler/rulestore/configdb/store.go b/pkg/ruler/rulestore/configdb/store.go index f29b126116e..50f931f9279 100644 --- a/pkg/ruler/rulestore/configdb/store.go +++ b/pkg/ruler/rulestore/configdb/store.go @@ -7,6 +7,7 @@ import ( "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/configs/userconfig" "github.com/cortexproject/cortex/pkg/ruler/rulespb" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -134,3 +135,7 @@ func (c *ConfigRuleStore) DeleteRuleGroup(ctx context.Context, userID, namespace func (c *ConfigRuleStore) DeleteNamespace(ctx context.Context, userID, namespace string) error { return errors.New("not implemented by the config service rule store") } + +func (c *ConfigRuleStore) GetUserIndexUpdater() *users.UserIndexUpdater { + return nil +} diff --git a/pkg/ruler/rulestore/local/local.go b/pkg/ruler/rulestore/local/local.go index 30722f0f4f3..88835fcb3ef 100644 --- a/pkg/ruler/rulestore/local/local.go +++ b/pkg/ruler/rulestore/local/local.go @@ -10,6 +10,7 @@ import ( promRules "github.com/prometheus/prometheus/rules" "github.com/cortexproject/cortex/pkg/ruler/rulespb" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -123,6 +124,10 @@ func (l *Client) DeleteRuleGroup(ctx context.Context, userID, namespace string, return errors.New("DeleteRuleGroup unsupported in rule local store") } +func (l *Client) GetUserIndexUpdater() *users.UserIndexUpdater { + return nil +} + // DeleteNamespace implements RulerStore func (l *Client) DeleteNamespace(ctx context.Context, userID, namespace string) error { return errors.New("DeleteNamespace unsupported in rule local store") diff --git a/pkg/ruler/rulestore/store.go b/pkg/ruler/rulestore/store.go index e59557cb9eb..d2272d7e3fa 100644 --- a/pkg/ruler/rulestore/store.go +++ b/pkg/ruler/rulestore/store.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/cortexproject/cortex/pkg/ruler/rulespb" + "github.com/cortexproject/cortex/pkg/util/users" ) var ( @@ -47,4 +48,7 @@ type RuleStore interface { // DeleteNamespace lists rule groups for given user and namespace, and deletes all rule groups. // If namespace is empty, deletes all rule groups for user. DeleteNamespace(ctx context.Context, userID, namespace string) error + + // GetUserIndexUpdater is getter for UserIndexUpdater + GetUserIndexUpdater() *users.UserIndexUpdater } diff --git a/pkg/ruler/storage.go b/pkg/ruler/storage.go index 1cfadb32d5d..498b32d71da 100644 --- a/pkg/ruler/storage.go +++ b/pkg/ruler/storage.go @@ -36,5 +36,5 @@ func NewRuleStore(ctx context.Context, cfg rulestore.Config, cfgProvider bucket. return nil, err } - return bucketclient.NewBucketRuleStore(bucketClient, cfgProvider, logger), nil + return bucketclient.NewBucketRuleStore(bucketClient, cfg.UsersScanner, cfgProvider, logger, reg) } diff --git a/pkg/ruler/store_mock_test.go b/pkg/ruler/store_mock_test.go index 75f38432a40..c149aa926e2 100644 --- a/pkg/ruler/store_mock_test.go +++ b/pkg/ruler/store_mock_test.go @@ -9,6 +9,7 @@ import ( "github.com/cortexproject/cortex/pkg/ruler/rulespb" "github.com/cortexproject/cortex/pkg/ruler/rulestore" + "github.com/cortexproject/cortex/pkg/util/users" ) type mockRuleStore struct { @@ -333,6 +334,10 @@ func (m *mockRuleStore) DeleteRuleGroup(ctx context.Context, userID string, name return nil } +func (m *mockRuleStore) GetUserIndexUpdater() *users.UserIndexUpdater { + return nil +} + func (m *mockRuleStore) DeleteNamespace(ctx context.Context, userID, namespace string) error { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 8223884b26f..9829ad238e9 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -30,13 +30,12 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler/fragment_table" "github.com/cortexproject/cortex/pkg/scheduler/queue" "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/httpgrpcutil" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -57,7 +56,7 @@ type Scheduler struct { connectedFrontends map[string]*connectedFrontend requestQueue *queue.RequestQueue - activeUsers *util.ActiveUsersCleanupService + activeUsers *users.ActiveUsersCleanupService pendingRequestsMu sync.Mutex @@ -156,7 +155,7 @@ func NewScheduler(cfg Config, limits Limits, log log.Logger, registerer promethe Help: "Number of query-frontend worker clients currently connected to the query-scheduler.", }, s.getConnectedFrontendClientsMetric) - s.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(s.cleanupMetricsForInactiveUser) + s.activeUsers = users.NewActiveUsersCleanupWithDefaultValues(s.cleanupMetricsForInactiveUser) var err error s.subservices, err = services.NewManager(s.requestQueue, s.activeUsers) @@ -420,7 +419,7 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr req.ctxCancel = cancel // aggregate the max queriers limit in the case of a multi tenant query - tenantIDs, err := tenant.TenantIDsFromOrgID(userID) + tenantIDs, err := users.TenantIDsFromOrgID(userID) if err != nil { return err } diff --git a/pkg/storage/bucket/bucket_util.go b/pkg/storage/bucket/bucket_util.go index c068d086952..fd1840296ae 100644 --- a/pkg/storage/bucket/bucket_util.go +++ b/pkg/storage/bucket/bucket_util.go @@ -49,3 +49,14 @@ func ListPrefixes(ctx context.Context, bkt objstore.Bucket, prefix string, logge }) return keys, err } + +func IsOneOfTheExpectedErrors(f ...objstore.IsOpFailureExpectedFunc) objstore.IsOpFailureExpectedFunc { + return func(err error) bool { + for _, f := range f { + if f(err) { + return true + } + } + return false + } +} diff --git a/pkg/storage/parquet/converter_marker.go b/pkg/storage/parquet/converter_marker.go index f53b4f53792..380b4244fed 100644 --- a/pkg/storage/parquet/converter_marker.go +++ b/pkg/storage/parquet/converter_marker.go @@ -13,7 +13,7 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" - "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/bucket" ) const ( @@ -28,7 +28,7 @@ type ConverterMark struct { func ReadConverterMark(ctx context.Context, id ulid.ULID, userBkt objstore.InstrumentedBucket, logger log.Logger) (*ConverterMark, error) { markerPath := path.Join(id.String(), ConverterMarkerFileName) - reader, err := userBkt.WithExpectedErrs(tsdb.IsOneOfTheExpectedErrors(userBkt.IsAccessDeniedErr, userBkt.IsObjNotFoundErr)).Get(ctx, markerPath) + reader, err := userBkt.WithExpectedErrs(bucket.IsOneOfTheExpectedErrors(userBkt.IsAccessDeniedErr, userBkt.IsObjNotFoundErr)).Get(ctx, markerPath) if err != nil { if userBkt.IsObjNotFoundErr(err) || userBkt.IsAccessDeniedErr(err) { return &ConverterMark{}, nil diff --git a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go index 04c807f6d9d..cdca85bb3ff 100644 --- a/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go +++ b/pkg/storage/tsdb/bucketindex/block_ids_fetcher_test.go @@ -16,8 +16,8 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util/concurrency" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestBlockIDsFetcher_Fetch(t *testing.T) { diff --git a/pkg/storage/tsdb/bucketindex/loader_test.go b/pkg/storage/tsdb/bucketindex/loader_test.go index 482fc43a6c6..111819cb17f 100644 --- a/pkg/storage/tsdb/bucketindex/loader_test.go +++ b/pkg/storage/tsdb/bucketindex/loader_test.go @@ -17,10 +17,9 @@ import ( "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/storage/bucket" - - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestLoader_GetIndex_ShouldLazyLoadBucketIndex(t *testing.T) { diff --git a/pkg/storage/tsdb/bucketindex/markers_bucket_client_test.go b/pkg/storage/tsdb/bucketindex/markers_bucket_client_test.go index b1ec64234c7..42612e20e4b 100644 --- a/pkg/storage/tsdb/bucketindex/markers_bucket_client_test.go +++ b/pkg/storage/tsdb/bucketindex/markers_bucket_client_test.go @@ -16,10 +16,9 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/cortexproject/cortex/pkg/storage/bucket/s3" - "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + "github.com/cortexproject/cortex/pkg/storage/bucket/s3" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestGlobalMarker_ShouldUploadGlobalLocation(t *testing.T) { diff --git a/pkg/storage/tsdb/bucketindex/markers_test.go b/pkg/storage/tsdb/bucketindex/markers_test.go index 039f2f973ff..08cba612d89 100644 --- a/pkg/storage/tsdb/bucketindex/markers_test.go +++ b/pkg/storage/tsdb/bucketindex/markers_test.go @@ -15,7 +15,7 @@ import ( "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestBlockDeletionMarkFilepath(t *testing.T) { diff --git a/pkg/storage/tsdb/bucketindex/storage.go b/pkg/storage/tsdb/bucketindex/storage.go index a884cfd57dc..e0065426f7b 100644 --- a/pkg/storage/tsdb/bucketindex/storage.go +++ b/pkg/storage/tsdb/bucketindex/storage.go @@ -13,8 +13,6 @@ import ( "github.com/pkg/errors" "github.com/thanos-io/objstore" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_errors "github.com/cortexproject/cortex/pkg/util/errors" "github.com/cortexproject/cortex/pkg/util/runutil" @@ -71,7 +69,7 @@ func ReadIndex(ctx context.Context, bkt objstore.Bucket, userID string, cfgProvi userBkt := bucket.NewUserBucketClient(userID, bkt, cfgProvider) // Get the bucket index. - reader, err := userBkt.WithExpectedErrs(tsdb.IsOneOfTheExpectedErrors(userBkt.IsAccessDeniedErr, userBkt.IsObjNotFoundErr)).Get(ctx, IndexCompressedFilename) + reader, err := userBkt.WithExpectedErrs(bucket.IsOneOfTheExpectedErrors(userBkt.IsAccessDeniedErr, userBkt.IsObjNotFoundErr)).Get(ctx, IndexCompressedFilename) if err != nil { if userBkt.IsObjNotFoundErr(err) { return nil, ErrIndexNotFound diff --git a/pkg/storage/tsdb/bucketindex/storage_test.go b/pkg/storage/tsdb/bucketindex/storage_test.go index 55f31672e80..595360e4845 100644 --- a/pkg/storage/tsdb/bucketindex/storage_test.go +++ b/pkg/storage/tsdb/bucketindex/storage_test.go @@ -14,12 +14,11 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/bucket/s3" - - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestReadIndex_ShouldReturnErrorIfIndexDoesNotExist(t *testing.T) { - bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + bkt, _ := testutil.PrepareFilesystemBucket(t) idx, err := ReadIndex(context.Background(), bkt, "user-1", nil, log.NewNopLogger()) require.Equal(t, ErrIndexNotFound, err) @@ -30,7 +29,7 @@ func TestReadIndex_ShouldReturnErrorIfIndexIsCorrupted(t *testing.T) { const userID = "user-1" ctx := context.Background() - bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + bkt, _ := testutil.PrepareFilesystemBucket(t) // Write a corrupted index. require.NoError(t, bkt.Upload(ctx, path.Join(userID, IndexCompressedFilename), strings.NewReader("invalid!}"))) @@ -41,11 +40,11 @@ func TestReadIndex_ShouldReturnErrorIfIndexIsCorrupted(t *testing.T) { } func TestReadIndex_ShouldReturnErrorIfKeyAccessDeniedErr(t *testing.T) { - bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) - bkt = &cortex_testutil.MockBucketFailure{ + bkt, _ := testutil.PrepareFilesystemBucket(t) + bkt = &testutil.MockBucketFailure{ Bucket: bkt, GetFailures: map[string]error{ - path.Join("user-1", "bucket-index.json.gz"): cortex_testutil.ErrKeyAccessDeniedError, + path.Join("user-1", "bucket-index.json.gz"): testutil.ErrKeyAccessDeniedError, }, } idx, err := ReadIndex(context.Background(), bkt, "user-1", nil, log.NewNopLogger()) @@ -59,13 +58,13 @@ func TestReadIndex_ShouldReturnTheParsedIndexOnSuccess(t *testing.T) { ctx := context.Background() logger := log.NewNopLogger() - bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + bkt, _ := testutil.PrepareFilesystemBucket(t) // Mock some blocks in the storage. bkt = BucketWithGlobalMarkers(bkt) - cortex_testutil.MockStorageBlock(t, bkt, userID, 10, 20) - cortex_testutil.MockStorageBlock(t, bkt, userID, 20, 30) - cortex_testutil.MockStorageDeletionMark(t, bkt, userID, cortex_testutil.MockStorageBlock(t, bkt, userID, 30, 40)) + testutil.MockStorageBlock(t, bkt, userID, 10, 20) + testutil.MockStorageBlock(t, bkt, userID, 20, 30) + testutil.MockStorageDeletionMark(t, bkt, userID, testutil.MockStorageBlock(t, bkt, userID, 30, 40)) // Write the index. u := NewUpdater(bkt, userID, nil, logger) @@ -85,9 +84,9 @@ func TestReadIndex_ShouldRetryUpload(t *testing.T) { ctx := context.Background() logger := log.NewNopLogger() - bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + bkt, _ := testutil.PrepareFilesystemBucket(t) - mBucket := &cortex_testutil.MockBucketFailure{ + mBucket := &testutil.MockBucketFailure{ Bucket: bkt, UploadFailures: map[string]error{userID: errors.New("test")}, } @@ -111,7 +110,7 @@ func BenchmarkReadIndex(b *testing.B) { ctx := context.Background() logger := log.NewNopLogger() - bkt, _ := cortex_testutil.PrepareFilesystemBucket(b) + bkt, _ := testutil.PrepareFilesystemBucket(b) // Mock some blocks and deletion marks in the storage. bkt = BucketWithGlobalMarkers(bkt) @@ -119,10 +118,10 @@ func BenchmarkReadIndex(b *testing.B) { minT := int64(i * 10) maxT := int64((i + 1) * 10) - block := cortex_testutil.MockStorageBlock(b, bkt, userID, minT, maxT) + block := testutil.MockStorageBlock(b, bkt, userID, minT, maxT) if i < numBlockDeletionMarks { - cortex_testutil.MockStorageDeletionMark(b, bkt, userID, block) + testutil.MockStorageDeletionMark(b, bkt, userID, block) } } @@ -146,7 +145,7 @@ func BenchmarkReadIndex(b *testing.B) { func TestDeleteIndex_ShouldNotReturnErrorIfIndexDoesNotExist(t *testing.T) { ctx := context.Background() - bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) + bkt, _ := testutil.PrepareFilesystemBucket(t) assert.NoError(t, DeleteIndex(ctx, bkt, "user-1", nil)) } diff --git a/pkg/storage/tsdb/bucketindex/updater.go b/pkg/storage/tsdb/bucketindex/updater.go index fd7efcba4d4..ccc2ade9beb 100644 --- a/pkg/storage/tsdb/bucketindex/updater.go +++ b/pkg/storage/tsdb/bucketindex/updater.go @@ -15,10 +15,8 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/cortexproject/cortex/pkg/storage/parquet" - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/bucket" + "github.com/cortexproject/cortex/pkg/storage/parquet" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/runutil" ) @@ -151,7 +149,7 @@ func (w *Updater) updateBlockIndexEntry(ctx context.Context, id ulid.ULID) (*Blo metaFile := path.Join(id.String(), block.MetaFilename) // Get the block's meta.json file. - r, err := w.bkt.ReaderWithExpectedErrs(tsdb.IsOneOfTheExpectedErrors(w.bkt.IsObjNotFoundErr, w.bkt.IsAccessDeniedErr)).Get(ctx, metaFile) + r, err := w.bkt.ReaderWithExpectedErrs(bucket.IsOneOfTheExpectedErrors(w.bkt.IsObjNotFoundErr, w.bkt.IsAccessDeniedErr)).Get(ctx, metaFile) if w.bkt.IsObjNotFoundErr(err) { return nil, ErrBlockMetaNotFound } diff --git a/pkg/storage/tsdb/bucketindex/updater_test.go b/pkg/storage/tsdb/bucketindex/updater_test.go index de2b0e8c16a..25988d6dc3a 100644 --- a/pkg/storage/tsdb/bucketindex/updater_test.go +++ b/pkg/storage/tsdb/bucketindex/updater_test.go @@ -23,7 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/parquet" - "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestUpdater_UpdateIndex(t *testing.T) { diff --git a/pkg/storage/tsdb/caching_bucket.go b/pkg/storage/tsdb/caching_bucket.go index 404438033aa..2d6b936bed4 100644 --- a/pkg/storage/tsdb/caching_bucket.go +++ b/pkg/storage/tsdb/caching_bucket.go @@ -22,6 +22,8 @@ import ( "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/model" storecache "github.com/thanos-io/thanos/pkg/store/cache" + + "github.com/cortexproject/cortex/pkg/util/users" ) var ( @@ -273,7 +275,7 @@ func CreateCachingBucketForCompactor(metadataConfig MetadataCacheConfig, cleaner matchers := NewMatchers() // Do not cache block deletion marker for compactor matchers.SetMetaFileMatcher(func(name string) bool { - return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+TenantDeletionMarkFile) + return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+users.TenantDeletionMarkFile) }) cfg := cache.NewCachingBucketConfig() cachingConfigured := false @@ -463,7 +465,7 @@ func isParquetChunkFile(name string) bool { return strings.HasSuffix(name, "chun func isParquetLabelsFile(name string) bool { return strings.HasSuffix(name, "labels.parquet") } func isMetaFile(name string) bool { - return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) || strings.HasSuffix(name, "/"+TenantDeletionMarkFile) + return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) || strings.HasSuffix(name, "/"+users.TenantDeletionMarkFile) } func isBlockIndexFile(name string) bool { diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index b51ad077bd4..0a3565c0740 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -18,6 +18,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/util/flagext" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -67,10 +68,10 @@ var ( // //nolint:revive type BlocksStorageConfig struct { - Bucket bucket.Config `yaml:",inline"` - BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."` - TSDB TSDBConfig `yaml:"tsdb"` - UsersScanner UsersScannerConfig `yaml:"users_scanner"` + Bucket bucket.Config `yaml:",inline"` + BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."` + TSDB TSDBConfig `yaml:"tsdb"` + UsersScanner users.UsersScannerConfig `yaml:"users_scanner"` } // DurationList is the block ranges for a tsdb @@ -115,7 +116,7 @@ func (cfg *BlocksStorageConfig) RegisterFlags(f *flag.FlagSet) { cfg.Bucket.RegisterFlagsWithPrefix("blocks-storage.", f) cfg.BucketStore.RegisterFlags(f) cfg.TSDB.RegisterFlags(f) - cfg.UsersScanner.RegisterFlagsWithPrefix("blocks-storage", f) + cfg.UsersScanner.RegisterFlagsWithPrefix("blocks-storage.", f) } // Validate the config. diff --git a/pkg/storage/tsdb/users_scanner_config.go b/pkg/storage/tsdb/users_scanner_config.go deleted file mode 100644 index 5b556d9e4e4..00000000000 --- a/pkg/storage/tsdb/users_scanner_config.go +++ /dev/null @@ -1,47 +0,0 @@ -package tsdb - -import ( - "flag" - "fmt" - "strings" - "time" - - "github.com/pkg/errors" -) - -type UsersScannerConfig struct { - Strategy string `yaml:"strategy"` - MaxStalePeriod time.Duration `yaml:"max_stale_period"` - CacheTTL time.Duration `yaml:"cache_ttl"` -} - -const ( - UserScanStrategyList = "list" - UserScanStrategyUserIndex = "user_index" -) - -var ( - ErrInvalidUserScannerStrategy = errors.New("invalid user scanner strategy") - ErrInvalidMaxStalePeriod = errors.New("max stale period must be positive") - ErrInvalidCacheTTL = errors.New("cache TTL must be >= 0") - supportedStrategies = []string{UserScanStrategyList, UserScanStrategyUserIndex} -) - -func (c *UsersScannerConfig) Validate() error { - if c.Strategy != UserScanStrategyList && c.Strategy != UserScanStrategyUserIndex { - return ErrInvalidUserScannerStrategy - } - if c.Strategy == UserScanStrategyUserIndex && c.MaxStalePeriod <= 0 { - return ErrInvalidMaxStalePeriod - } - if c.CacheTTL < 0 { - return ErrInvalidCacheTTL - } - return nil -} - -func (c *UsersScannerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&c.Strategy, prefix+".users-scanner.strategy", UserScanStrategyList, fmt.Sprintf("Strategy to use to scan users. Supported values are: %s.", strings.Join(supportedStrategies, ", "))) - f.DurationVar(&c.MaxStalePeriod, prefix+".users-scanner.user-index.max-stale-period", time.Hour, "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.") - f.DurationVar(&c.CacheTTL, prefix+".users-scanner.cache-ttl", 0, "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.") -} diff --git a/pkg/storage/tsdb/util.go b/pkg/storage/tsdb/util.go index e9a70074266..8b10403c09e 100644 --- a/pkg/storage/tsdb/util.go +++ b/pkg/storage/tsdb/util.go @@ -2,7 +2,6 @@ package tsdb import ( "github.com/oklog/ulid/v2" - "github.com/thanos-io/objstore" "github.com/cortexproject/cortex/pkg/ingester/client" ) @@ -16,14 +15,3 @@ func HashBlockID(id ulid.ULID) uint32 { } return h } - -func IsOneOfTheExpectedErrors(f ...objstore.IsOpFailureExpectedFunc) objstore.IsOpFailureExpectedFunc { - return func(err error) bool { - for _, f := range f { - if f(err) { - return true - } - } - return false - } -} diff --git a/pkg/storegateway/bucket_index_metadata_fetcher_test.go b/pkg/storegateway/bucket_index_metadata_fetcher_test.go index 8bd23eaa44a..859570f8ce4 100644 --- a/pkg/storegateway/bucket_index_metadata_fetcher_test.go +++ b/pkg/storegateway/bucket_index_metadata_fetcher_test.go @@ -21,8 +21,8 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" "github.com/cortexproject/cortex/pkg/util/concurrency" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestBucketIndexMetadataFetcher_Fetch(t *testing.T) { diff --git a/pkg/storegateway/bucket_stores.go b/pkg/storegateway/bucket_stores.go index b9da057ae23..b5633646811 100644 --- a/pkg/storegateway/bucket_stores.go +++ b/pkg/storegateway/bucket_stores.go @@ -35,12 +35,12 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/backoff" cortex_errors "github.com/cortexproject/cortex/pkg/util/errors" util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 831b7afb2b4..a34c7e39af6 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -43,10 +43,9 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket/filesystem" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestBucketStores_CustomerKeyError(t *testing.T) { @@ -400,14 +399,14 @@ func TestBucketStores_syncUsersBlocks(t *testing.T) { tests := map[string]struct { shardingStrategy ShardingStrategy expectedStores int32 - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants }{ "when sharding is disabled all users should be synced": { shardingStrategy: NewNoShardingStrategy(log.NewNopLogger(), nil), expectedStores: 3, }, "sharding disabled, user-1 disabled": { - shardingStrategy: NewNoShardingStrategy(log.NewNopLogger(), util.NewAllowedTenants(nil, []string{"user-1"})), + shardingStrategy: NewNoShardingStrategy(log.NewNopLogger(), users.NewAllowedTenants(nil, []string{"user-1"})), expectedStores: 2, }, "when sharding is enabled only stores for filtered users should be created": { @@ -427,16 +426,16 @@ func TestBucketStores_syncUsersBlocks(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", allUsers, nil) - bucketClient.MockIter(tenant.GlobalMarkersDir, []string{}, nil) + bucketClient.MockIter(users.GlobalMarkersDir, []string{}, nil) bucketClient.MockIter("user-1/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-1", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-1", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-1", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-1", "markers", users.TenantDeletionMarkFile), false, nil) bucketClient.MockIter("user-2/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-2", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-2", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-2", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-2", "markers", users.TenantDeletionMarkFile), false, nil) bucketClient.MockIter("user-3/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-3", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-3", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-3", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-3", "markers", users.TenantDeletionMarkFile), false, nil) stores, err := NewBucketStores(cfg, testData.shardingStrategy, bucketClient, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil) require.NoError(t, err) diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index 96eb8c31cca..bcc2cd3cf3d 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -30,6 +30,7 @@ import ( util_limiter "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -168,7 +169,7 @@ func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConf Help: "Total number of times the bucket sync operation triggered.", }, []string{"reason"}), } - allowedTenants := util.NewAllowedTenants(gatewayCfg.EnabledTenants, gatewayCfg.DisabledTenants) + allowedTenants := users.NewAllowedTenants(gatewayCfg.EnabledTenants, gatewayCfg.DisabledTenants) // Init metrics. g.bucketSync.WithLabelValues(syncReasonInitial) diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 6dcc8cd9913..bdb06320e19 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -38,14 +38,14 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket/filesystem" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" util_limiter "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/resource" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/test" + "github.com/cortexproject/cortex/pkg/util/testutil" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -164,16 +164,16 @@ func TestStoreGateway_InitialSyncWithDefaultShardingEnabled(t *testing.T) { assert.Equal(t, RingNumTokens, len(g.ringLifecycler.GetTokens())) assert.Subset(t, g.ringLifecycler.GetTokens(), testData.initialTokens) }) - bucketClient.MockIter(tenant.GlobalMarkersDir, []string{}, nil) + bucketClient.MockIter(users.GlobalMarkersDir, []string{}, nil) bucketClient.MockIter("user-1/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-1", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-1", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-1", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-1", "markers", users.TenantDeletionMarkFile), false, nil) bucketClient.MockIter("user-2/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-2", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-2", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-2", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-2", "markers", users.TenantDeletionMarkFile), false, nil) bucketClient.MockIter("user-disabled/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-disabled", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-disabled", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-disabled", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-disabled", "markers", users.TenantDeletionMarkFile), false, nil) // Once successfully started, the instance should be ACTIVE in the ring. require.NoError(t, services.StartAndAwaitRunning(ctx, g)) @@ -205,16 +205,16 @@ func TestStoreGateway_InitialSyncWithShardingDisabled(t *testing.T) { defer services.StopAndAwaitTerminated(ctx, g) //nolint:errcheck bucketClient.MockIter("", []string{"user-1", "user-2", "user-disabled"}, nil) - bucketClient.MockIter(tenant.GlobalMarkersDir, []string{}, nil) + bucketClient.MockIter(users.GlobalMarkersDir, []string{}, nil) bucketClient.MockIter("user-1/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-1", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-1", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-1", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-1", "markers", users.TenantDeletionMarkFile), false, nil) bucketClient.MockIter("user-2/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-2", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-2", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-2", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-2", "markers", users.TenantDeletionMarkFile), false, nil) bucketClient.MockIter("user-disabled/", []string{}, nil) - bucketClient.MockExists(path.Join(tenant.GlobalMarkersDir, "user-disabled", cortex_tsdb.TenantDeletionMarkFile), false, nil) - bucketClient.MockExists(path.Join("user-disabled", "markers", cortex_tsdb.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join(users.GlobalMarkersDir, "user-disabled", users.TenantDeletionMarkFile), false, nil) + bucketClient.MockExists(path.Join("user-disabled", "markers", users.TenantDeletionMarkFile), false, nil) require.NoError(t, services.StartAndAwaitRunning(ctx, g)) assert.NotNil(t, g.stores.getStore("user-1")) @@ -254,7 +254,7 @@ func TestStoreGateway_InitialSyncFailure(t *testing.T) { // at the same time, they will join the ring at a slightly different time). func TestStoreGateway_InitialSyncWithWaitRingStability(t *testing.T) { //parallel testing causes data race - bucketClient, storageDir := cortex_testutil.PrepareFilesystemBucket(t) + bucketClient, storageDir := testutil.PrepareFilesystemBucket(t) // This tests uses real TSDB blocks. 24h time range, 2h block range period, // 2 users = total (24 / 12) * 2 = 24 blocks. @@ -425,7 +425,7 @@ func TestStoreGateway_BlocksSyncWithDefaultSharding_RingTopologyChangedAfterScal expectedBlocksLoaded = 3 * numBlocks // blocks are replicated 3 times ) - bucketClient, storageDir := cortex_testutil.PrepareFilesystemBucket(t) + bucketClient, storageDir := testutil.PrepareFilesystemBucket(t) // This tests uses real TSDB blocks. 24h time range, 2h block range period, // 2 users = total (24 / 12) * 2 = 24 blocks. @@ -615,7 +615,7 @@ func TestStoreGateway_ShouldSupportLoadRingTokensFromFile(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) - bucketClient.MockIter(tenant.GlobalMarkersDir, []string{}, nil) + bucketClient.MockIter(users.GlobalMarkersDir, []string{}, nil) g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) @@ -825,7 +825,7 @@ func TestStoreGateway_SyncOnRingTopologyChanged(t *testing.T) { bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) - bucketClient.MockIter(tenant.GlobalMarkersDir, []string{}, nil) + bucketClient.MockIter(users.GlobalMarkersDir, []string{}, nil) g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), reg, nil) require.NoError(t, err) @@ -888,7 +888,7 @@ func TestStoreGateway_RingLifecyclerShouldAutoForgetUnhealthyInstances(t *testin bucketClient := &bucket.ClientMock{} bucketClient.MockIter("", []string{}, nil) - bucketClient.MockIter(tenant.GlobalMarkersDir, []string{}, nil) + bucketClient.MockIter(users.GlobalMarkersDir, []string{}, nil) g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), mockLoggingLevel(), log.NewNopLogger(), nil, nil) require.NoError(t, err) diff --git a/pkg/storegateway/metadata_fetcher_filters_test.go b/pkg/storegateway/metadata_fetcher_filters_test.go index da46caf4263..26832fb5854 100644 --- a/pkg/storegateway/metadata_fetcher_filters_test.go +++ b/pkg/storegateway/metadata_fetcher_filters_test.go @@ -23,7 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestIgnoreDeletionMarkFilter_Filter(t *testing.T) { diff --git a/pkg/storegateway/sharding_strategy.go b/pkg/storegateway/sharding_strategy.go index eb8ac75f6e9..b38af6f4e88 100644 --- a/pkg/storegateway/sharding_strategy.go +++ b/pkg/storegateway/sharding_strategy.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/users" ) const ( @@ -44,7 +45,7 @@ type ShardingLimits interface { StoreGatewayTenantShardSize(userID string) float64 } -func filterDisallowedTenants(userIDs []string, logger log.Logger, allowedTenants *util.AllowedTenants) []string { +func filterDisallowedTenants(userIDs []string, logger log.Logger, allowedTenants *users.AllowedTenants) []string { filteredUserIDs := []string{} for _, userID := range userIDs { if !allowedTenants.IsAllowed(userID) { @@ -61,10 +62,10 @@ func filterDisallowedTenants(userIDs []string, logger log.Logger, allowedTenants // NoShardingStrategy is a no-op strategy. When this strategy is used, no tenant/block is filtered out. type NoShardingStrategy struct { logger log.Logger - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants } -func NewNoShardingStrategy(logger log.Logger, allowedTenants *util.AllowedTenants) *NoShardingStrategy { +func NewNoShardingStrategy(logger log.Logger, allowedTenants *users.AllowedTenants) *NoShardingStrategy { return &NoShardingStrategy{ logger: logger, allowedTenants: allowedTenants, @@ -89,11 +90,11 @@ type DefaultShardingStrategy struct { r *ring.Ring instanceAddr string logger log.Logger - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants } // NewDefaultShardingStrategy creates DefaultShardingStrategy. -func NewDefaultShardingStrategy(r *ring.Ring, instanceAddr string, logger log.Logger, allowedTenants *util.AllowedTenants) *DefaultShardingStrategy { +func NewDefaultShardingStrategy(r *ring.Ring, instanceAddr string, logger log.Logger, allowedTenants *users.AllowedTenants) *DefaultShardingStrategy { return &DefaultShardingStrategy{ r: r, instanceAddr: instanceAddr, @@ -135,11 +136,11 @@ type ShuffleShardingStrategy struct { logger log.Logger zoneStableShuffleSharding bool - allowedTenants *util.AllowedTenants + allowedTenants *users.AllowedTenants } // NewShuffleShardingStrategy makes a new ShuffleShardingStrategy. -func NewShuffleShardingStrategy(r *ring.Ring, instanceID, instanceAddr string, limits ShardingLimits, logger log.Logger, allowedTenants *util.AllowedTenants, zoneStableShuffleSharding bool) *ShuffleShardingStrategy { +func NewShuffleShardingStrategy(r *ring.Ring, instanceID, instanceAddr string, limits ShardingLimits, logger log.Logger, allowedTenants *users.AllowedTenants, zoneStableShuffleSharding bool) *ShuffleShardingStrategy { return &ShuffleShardingStrategy{ r: r, instanceID: instanceID, diff --git a/pkg/storegateway/sharding_strategy_test.go b/pkg/storegateway/sharding_strategy_test.go index 2f2e002e09c..744d9e751bd 100644 --- a/pkg/storegateway/sharding_strategy_test.go +++ b/pkg/storegateway/sharding_strategy_test.go @@ -22,8 +22,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/consul" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/users" ) func TestDefaultShardingStrategy(t *testing.T) { @@ -646,9 +646,9 @@ func TestShuffleShardingStrategy(t *testing.T) { // Wait until the ring client has synced. require.NoError(t, ring.WaitInstanceState(ctx, r, "instance-1", ring.ACTIVE)) - var allowedTenants *util.AllowedTenants + var allowedTenants *users.AllowedTenants if testData.isTenantDisabled { - allowedTenants = util.NewAllowedTenants(nil, []string{userID}) + allowedTenants = users.NewAllowedTenants(nil, []string{userID}) } // Assert on filter users. diff --git a/pkg/util/errors/errors.go b/pkg/util/errors/errors.go index 141462b531e..574809c248f 100644 --- a/pkg/util/errors/errors.go +++ b/pkg/util/errors/errors.go @@ -1,6 +1,8 @@ package errors -import "errors" +import ( + "errors" +) type errWithCause struct { error diff --git a/pkg/util/log/wrappers.go b/pkg/util/log/wrappers.go index 9a706a570e5..701c2256697 100644 --- a/pkg/util/log/wrappers.go +++ b/pkg/util/log/wrappers.go @@ -8,8 +8,8 @@ import ( "github.com/uber/jaeger-client-go" "go.opentelemetry.io/otel/trace" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/requestmeta" + "github.com/cortexproject/cortex/pkg/util/users" ) // WithUserID returns a Logger that has information about the current user in @@ -44,7 +44,7 @@ func WithContext(ctx context.Context, l log.Logger) log.Logger { // Weaveworks uses "orgs" and "orgID" to represent Cortex users, // even though the code-base generally uses `userID` to refer to the same thing. - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err == nil { l = WithUserID(userID, l) } diff --git a/pkg/util/push/otlp.go b/pkg/util/push/otlp.go index cdf1259d122..179e1d3d8fd 100644 --- a/pkg/util/push/otlp.go +++ b/pkg/util/push/otlp.go @@ -23,9 +23,9 @@ import ( "github.com/cortexproject/cortex/pkg/cortexpb" "github.com/cortexproject/cortex/pkg/distributor" - "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -47,7 +47,7 @@ func OTLPHandler(maxRecvMsgSize int, overrides *validation.Overrides, cfg distri } } - userID, err := tenant.TenantID(ctx) + userID, err := users.TenantID(ctx) if err != nil { return } diff --git a/pkg/util/spanlogger/spanlogger.go b/pkg/util/spanlogger/spanlogger.go index cde7ae045ab..aa78dbea7f3 100644 --- a/pkg/util/spanlogger/spanlogger.go +++ b/pkg/util/spanlogger/spanlogger.go @@ -9,8 +9,8 @@ import ( "github.com/opentracing/opentracing-go/ext" otlog "github.com/opentracing/opentracing-go/log" - "github.com/cortexproject/cortex/pkg/tenant" util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/users" ) type loggerCtxMarker struct{} @@ -39,7 +39,7 @@ func New(ctx context.Context, method string, kvps ...any) (*SpanLogger, context. // retrieved with FromContext or FromContextWithFallback. func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...any) (*SpanLogger, context.Context) { span, ctx := opentracing.StartSpanFromContext(ctx, method) - if ids, _ := tenant.TenantIDs(ctx); len(ids) > 0 { + if ids, _ := users.TenantIDs(ctx); len(ids) > 0 { span.SetTag(TenantIDTagName, ids) } logger := &SpanLogger{ diff --git a/pkg/storage/tsdb/testutil/block_mock.go b/pkg/util/testutil/block_mock.go similarity index 100% rename from pkg/storage/tsdb/testutil/block_mock.go rename to pkg/util/testutil/block_mock.go diff --git a/pkg/storage/tsdb/testutil/objstore.go b/pkg/util/testutil/objstore.go similarity index 100% rename from pkg/storage/tsdb/testutil/objstore.go rename to pkg/util/testutil/objstore.go diff --git a/pkg/util/active_user.go b/pkg/util/users/active_user.go similarity index 99% rename from pkg/util/active_user.go rename to pkg/util/users/active_user.go index 2fde25ed123..ebc80a463e9 100644 --- a/pkg/util/active_user.go +++ b/pkg/util/users/active_user.go @@ -1,4 +1,4 @@ -package util +package users import ( "context" diff --git a/pkg/util/active_user_test.go b/pkg/util/users/active_user_test.go similarity index 99% rename from pkg/util/active_user_test.go rename to pkg/util/users/active_user_test.go index 60e97f6dbc8..4c92f090f66 100644 --- a/pkg/util/active_user_test.go +++ b/pkg/util/users/active_user_test.go @@ -1,4 +1,4 @@ -package util +package users import ( "fmt" diff --git a/pkg/util/allowed_tenants.go b/pkg/util/users/allowed_tenants.go similarity index 93% rename from pkg/util/allowed_tenants.go rename to pkg/util/users/allowed_tenants.go index ff406b381b8..18f68f18e25 100644 --- a/pkg/util/allowed_tenants.go +++ b/pkg/util/users/allowed_tenants.go @@ -1,6 +1,4 @@ -package util - -import "github.com/cortexproject/cortex/pkg/tenant" +package users // AllowedTenants that can answer whether tenant is allowed or not based on configuration. // Default value (nil) allows all tenants. @@ -36,7 +34,7 @@ func NewAllowedTenants(enabled []string, disabled []string) *AllowedTenants { } func (a *AllowedTenants) IsAllowed(tenantID string) bool { - if tenantID == tenant.GlobalMarkersDir { + if tenantID == GlobalMarkersDir { // __markers__ is reserved for global markers and no tenant should be allowed to have that name. return false } diff --git a/pkg/util/allowed_tenants_test.go b/pkg/util/users/allowed_tenants_test.go similarity index 98% rename from pkg/util/allowed_tenants_test.go rename to pkg/util/users/allowed_tenants_test.go index da49c8f028c..53c5a7e5879 100644 --- a/pkg/util/allowed_tenants_test.go +++ b/pkg/util/users/allowed_tenants_test.go @@ -1,4 +1,4 @@ -package util +package users import ( "testing" diff --git a/pkg/storage/tsdb/users/cache.go b/pkg/util/users/cache.go similarity index 90% rename from pkg/storage/tsdb/users/cache.go rename to pkg/util/users/cache.go index 54414329a2c..a215669f9b8 100644 --- a/pkg/storage/tsdb/users/cache.go +++ b/pkg/util/users/cache.go @@ -7,8 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/cortexproject/cortex/pkg/storage/tsdb" ) // cachedScanner is a scanner that caches the result of the underlying scanner. @@ -25,7 +23,7 @@ type cachedScanner struct { hits prometheus.Counter } -func newCachedScanner(scanner Scanner, cfg tsdb.UsersScannerConfig, reg prometheus.Registerer) *cachedScanner { +func newCachedScanner(scanner Scanner, cfg UsersScannerConfig, reg prometheus.Registerer) *cachedScanner { return &cachedScanner{ scanner: scanner, ttl: cfg.CacheTTL, diff --git a/pkg/storage/tsdb/users/cache_test.go b/pkg/util/users/cache_test.go similarity index 94% rename from pkg/storage/tsdb/users/cache_test.go rename to pkg/util/users/cache_test.go index 6ef5588c922..35f1e75e22f 100644 --- a/pkg/storage/tsdb/users/cache_test.go +++ b/pkg/util/users/cache_test.go @@ -8,8 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/cortexproject/cortex/pkg/storage/tsdb" ) func TestCachedScanner_ScanUsers(t *testing.T) { @@ -73,7 +71,7 @@ func TestCachedScanner_ScanUsers(t *testing.T) { t.Parallel() reg := prometheus.NewRegistry() - cachedScanner := newCachedScanner(testData.scanner, tsdb.UsersScannerConfig{ + cachedScanner := newCachedScanner(testData.scanner, UsersScannerConfig{ CacheTTL: testData.ttl, }, reg) @@ -123,7 +121,7 @@ func TestCachedScanner_ConcurrentAccess(t *testing.T) { deleted: []string{"user-3"}, } - cachedScanner := newCachedScanner(scanner, tsdb.UsersScannerConfig{ + cachedScanner := newCachedScanner(scanner, UsersScannerConfig{ CacheTTL: 1 * time.Hour, }, reg) diff --git a/pkg/storage/tsdb/users/index.go b/pkg/util/users/index.go similarity index 100% rename from pkg/storage/tsdb/users/index.go rename to pkg/util/users/index.go diff --git a/pkg/storage/tsdb/users/index_test.go b/pkg/util/users/index_test.go similarity index 97% rename from pkg/storage/tsdb/users/index_test.go rename to pkg/util/users/index_test.go index 7fcf235466b..4b34535e100 100644 --- a/pkg/storage/tsdb/users/index_test.go +++ b/pkg/util/users/index_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) func TestWriteAndReadUserIndex(t *testing.T) { diff --git a/pkg/storage/tsdb/users/updater.go b/pkg/util/users/index_updater.go similarity index 69% rename from pkg/storage/tsdb/users/updater.go rename to pkg/util/users/index_updater.go index b6f185348b2..bac9a89036a 100644 --- a/pkg/storage/tsdb/users/updater.go +++ b/pkg/util/users/index_updater.go @@ -10,16 +10,18 @@ import ( ) type UserIndexUpdater struct { - bkt objstore.InstrumentedBucket - scanner Scanner + bkt objstore.InstrumentedBucket + cleanupInterval time.Duration + scanner Scanner userIndexLastUpdated prometheus.Gauge } -func NewUserIndexUpdater(bkt objstore.InstrumentedBucket, scanner Scanner, reg prometheus.Registerer) *UserIndexUpdater { +func NewUserIndexUpdater(bkt objstore.InstrumentedBucket, cleanupInterval time.Duration, scanner Scanner, reg prometheus.Registerer) *UserIndexUpdater { return &UserIndexUpdater{ - bkt: bkt, - scanner: scanner, + bkt: bkt, + cleanupInterval: cleanupInterval, + scanner: scanner, userIndexLastUpdated: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "cortex_user_index_last_successful_update_timestamp_seconds", Help: "Timestamp of the last successful update of user index.", @@ -27,6 +29,10 @@ func NewUserIndexUpdater(bkt objstore.InstrumentedBucket, scanner Scanner, reg p } } +func (u *UserIndexUpdater) GetCleanUpInterval() time.Duration { + return u.cleanupInterval +} + func (u *UserIndexUpdater) UpdateUserIndex(ctx context.Context) error { active, deleting, deleted, err := u.scanner.ScanUsers(ctx) if err != nil { diff --git a/pkg/storage/tsdb/users/updater_test.go b/pkg/util/users/index_updater_test.go similarity index 92% rename from pkg/storage/tsdb/users/updater_test.go rename to pkg/util/users/index_updater_test.go index c5273c8e039..0b4a6a68e9c 100644 --- a/pkg/storage/tsdb/users/updater_test.go +++ b/pkg/util/users/index_updater_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/storage/bucket" - cortex_testutil "github.com/cortexproject/cortex/pkg/storage/tsdb/testutil" + cortex_testutil "github.com/cortexproject/cortex/pkg/util/testutil" ) type mockScanner struct { @@ -77,7 +77,7 @@ func TestUserIndexUpdater_UpdateUserIndex(t *testing.T) { t.Parallel() bkt, _ := cortex_testutil.PrepareFilesystemBucket(t) - updater := NewUserIndexUpdater(bkt, testData.scanner, nil) + updater := NewUserIndexUpdater(bkt, defaultCleanUpInterval, testData.scanner, nil) err := updater.UpdateUserIndex(ctx) if testData.expectErr { @@ -118,7 +118,7 @@ func TestUserIndexUpdater_UpdateUserIndex_WriteError(t *testing.T) { // Mock the bucket to return an error on upload bkt.MockUpload(UserIndexCompressedFilename, assert.AnError) - updater := NewUserIndexUpdater(bkt, scanner, nil) + updater := NewUserIndexUpdater(bkt, defaultCleanUpInterval, scanner, nil) err := updater.UpdateUserIndex(ctx) require.Error(t, err) assert.Contains(t, err.Error(), "upload user index") diff --git a/pkg/tenant/resolver.go b/pkg/util/users/resolver.go similarity index 99% rename from pkg/tenant/resolver.go rename to pkg/util/users/resolver.go index 3505030c674..0390dd45d85 100644 --- a/pkg/tenant/resolver.go +++ b/pkg/util/users/resolver.go @@ -1,4 +1,4 @@ -package tenant +package users import ( "context" diff --git a/pkg/tenant/resolver_test.go b/pkg/util/users/resolver_test.go similarity index 99% rename from pkg/tenant/resolver_test.go rename to pkg/util/users/resolver_test.go index e29bde51b31..0b852d408b7 100644 --- a/pkg/tenant/resolver_test.go +++ b/pkg/util/users/resolver_test.go @@ -1,4 +1,4 @@ -package tenant +package users import ( "context" diff --git a/pkg/storage/tsdb/users/scanner.go b/pkg/util/users/scanner.go similarity index 89% rename from pkg/storage/tsdb/users/scanner.go rename to pkg/util/users/scanner.go index f77de5b4f6f..7ea45f8999c 100644 --- a/pkg/storage/tsdb/users/scanner.go +++ b/pkg/util/users/scanner.go @@ -13,13 +13,10 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/thanos-io/objstore" - - "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/tenant" ) var ( - userIDsToSkip = []string{tenant.GlobalMarkersDir, UserIndexCompressedFilename} + userIDsToSkip = []string{GlobalMarkersDir, UserIndexCompressedFilename} ) type Scanner interface { @@ -29,15 +26,15 @@ type Scanner interface { ScanUsers(ctx context.Context) (active, deleting, deleted []string, err error) } -func NewScanner(cfg tsdb.UsersScannerConfig, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) (Scanner, error) { +func NewScanner(cfg UsersScannerConfig, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) (Scanner, error) { var scanner Scanner switch cfg.Strategy { - case tsdb.UserScanStrategyList: + case UserScanStrategyList: scanner = &listScanner{bkt: bkt} - case tsdb.UserScanStrategyUserIndex: + case UserScanStrategyUserIndex: scanner = newUserIndexScanner(&listScanner{bkt: bkt}, cfg, bkt, logger, reg) default: - return nil, tsdb.ErrInvalidUserScannerStrategy + return nil, ErrInvalidUserScannerStrategy } if cfg.CacheTTL > 0 { @@ -78,7 +75,7 @@ func (s *listScanner) ScanUsers(ctx context.Context) (active, deleting, deleted } // Scan users from the __markers__ directory. - err = s.bkt.Iter(ctx, tenant.GlobalMarkersDir, func(entry string) error { + err = s.bkt.Iter(ctx, GlobalMarkersDir, func(entry string) error { // entry will be of the form __markers__// parts := strings.Split(entry, objstore.DirDelim) userID := parts[1] @@ -92,7 +89,7 @@ func (s *listScanner) ScanUsers(ctx context.Context) (active, deleting, deleted for userID := range scannedActiveUsers { // Tenant deletion mark could exist in local path for legacy code. // If tenant deletion mark exists but user ID prefix exists in the bucket, mark it as deleting. - if deletionMarkExists, err := tsdb.TenantDeletionMarkExists(ctx, s.bkt, userID); err == nil && deletionMarkExists { + if deletionMarkExists, err := TenantDeletionMarkExists(ctx, s.bkt, userID); err == nil && deletionMarkExists { deletingUsers[userID] = struct{}{} continue } @@ -131,7 +128,7 @@ type userIndexScanner struct { userIndexUpdateDelay prometheus.Gauge } -func newUserIndexScanner(baseScanner Scanner, cfg tsdb.UsersScannerConfig, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) *userIndexScanner { +func newUserIndexScanner(baseScanner Scanner, cfg UsersScannerConfig, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) *userIndexScanner { return &userIndexScanner{ bkt: bkt, logger: logger, diff --git a/pkg/util/users/scanner_config.go b/pkg/util/users/scanner_config.go new file mode 100644 index 00000000000..8e39a633c74 --- /dev/null +++ b/pkg/util/users/scanner_config.go @@ -0,0 +1,51 @@ +package users + +import ( + "flag" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" +) + +type UsersScannerConfig struct { + Strategy string `yaml:"strategy"` + MaxStalePeriod time.Duration `yaml:"max_stale_period"` + CleanUpInterval time.Duration `yaml:"clean_up_interval"` + CacheTTL time.Duration `yaml:"cache_ttl"` +} + +const ( + UserScanStrategyList = "list" + UserScanStrategyUserIndex = "user_index" + + defaultCleanUpInterval = time.Minute * 15 +) + +var ( + ErrInvalidUserScannerStrategy = errors.New("invalid user scanner strategy") + ErrInvalidMaxStalePeriod = errors.New("max stale period must be positive") + ErrInvalidCacheTTL = errors.New("cache TTL must be >= 0") + supportedStrategies = []string{UserScanStrategyList, UserScanStrategyUserIndex} +) + +func (c *UsersScannerConfig) Validate() error { + if c.Strategy != UserScanStrategyList && c.Strategy != UserScanStrategyUserIndex { + return ErrInvalidUserScannerStrategy + } + if c.Strategy == UserScanStrategyUserIndex && c.MaxStalePeriod <= 0 { + return ErrInvalidMaxStalePeriod + } + if c.CacheTTL < 0 { + return ErrInvalidCacheTTL + } + return nil +} + +func (c *UsersScannerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&c.Strategy, prefix+"users-scanner.strategy", UserScanStrategyList, fmt.Sprintf("Strategy to use to scan users. Supported values are: %s.", strings.Join(supportedStrategies, ", "))) + f.DurationVar(&c.MaxStalePeriod, prefix+"users-scanner.user-index.max-stale-period", time.Hour, "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.") + f.DurationVar(&c.CleanUpInterval, prefix+"users-scanner.user-index.cleanup-interval", defaultCleanUpInterval, fmt.Sprintf("How frequently user index file is updated, it only take effect when user scan stratehy is %s.", UserScanStrategyUserIndex)) + f.DurationVar(&c.CacheTTL, prefix+"users-scanner.cache-ttl", 0, "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.") +} diff --git a/pkg/storage/tsdb/users_scanner_config_test.go b/pkg/util/users/scanner_config_test.go similarity index 99% rename from pkg/storage/tsdb/users_scanner_config_test.go rename to pkg/util/users/scanner_config_test.go index 9e6d20a37c7..ed55d1c3d0d 100644 --- a/pkg/storage/tsdb/users_scanner_config_test.go +++ b/pkg/util/users/scanner_config_test.go @@ -1,4 +1,4 @@ -package tsdb +package users import ( "testing" diff --git a/pkg/storage/tsdb/users/scanner_test.go b/pkg/util/users/scanner_test.go similarity index 90% rename from pkg/storage/tsdb/users/scanner_test.go rename to pkg/util/users/scanner_test.go index 6f906237346..96b73add809 100644 --- a/pkg/storage/tsdb/users/scanner_test.go +++ b/pkg/util/users/scanner_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/require" "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/storage/tsdb" ) func TestListScanner_ScanUsers(t *testing.T) { @@ -36,12 +35,12 @@ func TestListScanner_ScanUsers(t *testing.T) { // Marked for deletion users b.MockIter("__markers__", []string{"__markers__/user-1/", "__markers__/user-4/", "__markers__/user-5/"}, nil) // Deletion marks - b.MockExists(tsdb.GetGlobalDeletionMarkPath("user-1"), true, nil) - b.MockExists(tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) - b.MockExists(tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - b.MockExists(tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) - b.MockExists(tsdb.GetGlobalDeletionMarkPath("user-3"), false, nil) - b.MockExists(tsdb.GetLocalDeletionMarkPath("user-3"), false, nil) + b.MockExists(GetGlobalDeletionMarkPath("user-1"), true, nil) + b.MockExists(GetLocalDeletionMarkPath("user-1"), false, nil) + b.MockExists(GetGlobalDeletionMarkPath("user-2"), false, nil) + b.MockExists(GetLocalDeletionMarkPath("user-2"), false, nil) + b.MockExists(GetGlobalDeletionMarkPath("user-3"), false, nil) + b.MockExists(GetLocalDeletionMarkPath("user-3"), false, nil) }, expectedActive: []string{"user-2", "user-3"}, expectedDeleting: []string{"user-1"}, @@ -160,8 +159,8 @@ func TestUserIndexScanner_ScanUsers(t *testing.T) { // Base scanner results b.MockIter("", []string{"user-2/"}, nil) b.MockIter("__markers__", []string{}, nil) - b.MockExists(tsdb.GetGlobalDeletionMarkPath("user-2"), false, nil) - b.MockExists(tsdb.GetLocalDeletionMarkPath("user-2"), false, nil) + b.MockExists(GetGlobalDeletionMarkPath("user-2"), false, nil) + b.MockExists(GetLocalDeletionMarkPath("user-2"), false, nil) }, maxStalePeriod: 1 * time.Hour, expectedActive: []string{"user-2"}, @@ -174,8 +173,8 @@ func TestUserIndexScanner_ScanUsers(t *testing.T) { b.MockGet(UserIndexCompressedFilename, "", errors.New("failed to read index")) b.MockIter("", []string{"user-1/"}, nil) b.MockIter("__markers__", []string{}, nil) - b.MockExists(tsdb.GetGlobalDeletionMarkPath("user-1"), false, nil) - b.MockExists(tsdb.GetLocalDeletionMarkPath("user-1"), false, nil) + b.MockExists(GetGlobalDeletionMarkPath("user-1"), false, nil) + b.MockExists(GetLocalDeletionMarkPath("user-1"), false, nil) }, maxStalePeriod: 1 * time.Hour, expectedActive: []string{"user-1"}, @@ -193,7 +192,7 @@ func TestUserIndexScanner_ScanUsers(t *testing.T) { testData.bucketSetup(bucketClient) baseScanner := &listScanner{bkt: bucketClient} - scanner := newUserIndexScanner(baseScanner, tsdb.UsersScannerConfig{ + scanner := newUserIndexScanner(baseScanner, UsersScannerConfig{ MaxStalePeriod: testData.maxStalePeriod, }, bucketClient, logger, nil) diff --git a/pkg/util/users/shard.go b/pkg/util/users/shard.go new file mode 100644 index 00000000000..9c2bd961aa3 --- /dev/null +++ b/pkg/util/users/shard.go @@ -0,0 +1,10 @@ +package users + +import "hash/fnv" + +func ShardByUser(userID string) uint32 { + ringHasher := fnv.New32a() + // Hasher never returns err. + _, _ = ringHasher.Write([]byte(userID)) + return ringHasher.Sum32() +} diff --git a/pkg/tenant/tenant.go b/pkg/util/users/tenant.go similarity index 99% rename from pkg/tenant/tenant.go rename to pkg/util/users/tenant.go index f8e97c6538b..8443c548812 100644 --- a/pkg/tenant/tenant.go +++ b/pkg/util/users/tenant.go @@ -1,4 +1,4 @@ -package tenant +package users import ( "context" diff --git a/pkg/storage/tsdb/tenant_deletion_mark.go b/pkg/util/users/tenant_deletion_mark.go similarity index 88% rename from pkg/storage/tsdb/tenant_deletion_mark.go rename to pkg/util/users/tenant_deletion_mark.go index 5e0eda2d34e..622f1e8bdb1 100644 --- a/pkg/storage/tsdb/tenant_deletion_mark.go +++ b/pkg/util/users/tenant_deletion_mark.go @@ -1,4 +1,4 @@ -package tsdb +package users import ( "bytes" @@ -7,12 +7,10 @@ import ( "path" "time" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/thanos-io/objstore" - - "github.com/cortexproject/cortex/pkg/tenant" - util_log "github.com/cortexproject/cortex/pkg/util/log" ) const TenantDeletionMarkFile = "tenant-deletion-mark.json" @@ -49,16 +47,16 @@ func WriteTenantDeletionMark(ctx context.Context, bkt objstore.InstrumentedBucke } // Returns tenant deletion mark for given user, if it exists. If it doesn't exist, returns nil mark, and no error. -func ReadTenantDeletionMark(ctx context.Context, bkt objstore.InstrumentedBucket, userID string) (*TenantDeletionMark, error) { +func ReadTenantDeletionMark(ctx context.Context, bkt objstore.InstrumentedBucket, userID string, logger log.Logger) (*TenantDeletionMark, error) { markerFile := GetGlobalDeletionMarkPath(userID) - if mark, err := read(ctx, bkt.WithExpectedErrs(bkt.IsObjNotFoundErr), markerFile); err != nil { + if mark, err := read(ctx, bkt.WithExpectedErrs(bkt.IsObjNotFoundErr), markerFile, logger); err != nil { return nil, err } else if mark != nil { return mark, nil } markerFile = GetLocalDeletionMarkPath(userID) - return read(ctx, bkt.WithExpectedErrs(bkt.IsObjNotFoundErr), markerFile) + return read(ctx, bkt.WithExpectedErrs(bkt.IsObjNotFoundErr), markerFile, logger) } // Deletes the tenant deletion mark for given user if it exists. @@ -77,7 +75,7 @@ func GetLocalDeletionMarkPath(userID string) string { } func GetGlobalDeletionMarkPath(userID string) string { - return path.Join(tenant.GlobalMarkersDir, userID, TenantDeletionMarkFile) + return path.Join(GlobalMarkersDir, userID, TenantDeletionMarkFile) } func exists(ctx context.Context, bkt objstore.BucketReader, markerFile string) (bool, error) { @@ -93,7 +91,7 @@ func write(ctx context.Context, bkt objstore.Bucket, markerFile string, mark *Te return errors.Wrap(bkt.Upload(ctx, markerFile, bytes.NewReader(data)), "upload tenant deletion mark") } -func read(ctx context.Context, bkt objstore.BucketReader, markerFile string) (*TenantDeletionMark, error) { +func read(ctx context.Context, bkt objstore.BucketReader, markerFile string, logger log.Logger) (*TenantDeletionMark, error) { r, err := bkt.Get(ctx, markerFile) if err != nil { if bkt.IsObjNotFoundErr(err) { @@ -108,7 +106,7 @@ func read(ctx context.Context, bkt objstore.BucketReader, markerFile string) (*T // Close reader before dealing with decode error. if closeErr := r.Close(); closeErr != nil { - level.Warn(util_log.Logger).Log("msg", "failed to close bucket reader", "err", closeErr) + level.Warn(logger).Log("msg", "failed to close bucket reader", "err", closeErr) } if err != nil { diff --git a/pkg/storage/tsdb/tenant_deletion_mark_test.go b/pkg/util/users/tenant_deletion_mark_test.go similarity index 99% rename from pkg/storage/tsdb/tenant_deletion_mark_test.go rename to pkg/util/users/tenant_deletion_mark_test.go index f95388be068..5da53554260 100644 --- a/pkg/storage/tsdb/tenant_deletion_mark_test.go +++ b/pkg/util/users/tenant_deletion_mark_test.go @@ -1,4 +1,4 @@ -package tsdb +package users import ( "bytes" diff --git a/pkg/tenant/tenant_test.go b/pkg/util/users/tenant_test.go similarity index 98% rename from pkg/tenant/tenant_test.go rename to pkg/util/users/tenant_test.go index e2ec65bf41a..a485e1bd427 100644 --- a/pkg/tenant/tenant_test.go +++ b/pkg/util/users/tenant_test.go @@ -1,4 +1,4 @@ -package tenant +package users import ( "strings" diff --git a/schemas/cortex-config-schema.json b/schemas/cortex-config-schema.json index 1ae848a9c45..1bd4da387e6 100644 --- a/schemas/cortex-config-schema.json +++ b/schemas/cortex-config-schema.json @@ -902,6 +902,38 @@ } }, "type": "object" + }, + "users_scanner": { + "properties": { + "cache_ttl": { + "default": "0s", + "description": "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.", + "type": "string", + "x-cli-flag": "alertmanager-storage.users-scanner.cache-ttl", + "x-format": "duration" + }, + "clean_up_interval": { + "default": "15m0s", + "description": "How frequently user index file is updated, it only take effect when user scan stratehy is user_index.", + "type": "string", + "x-cli-flag": "alertmanager-storage.users-scanner.user-index.cleanup-interval", + "x-format": "duration" + }, + "max_stale_period": { + "default": "1h0m0s", + "description": "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.", + "type": "string", + "x-cli-flag": "alertmanager-storage.users-scanner.user-index.max-stale-period", + "x-format": "duration" + }, + "strategy": { + "default": "list", + "description": "Strategy to use to scan users. Supported values are: list, user_index.", + "type": "string", + "x-cli-flag": "alertmanager-storage.users-scanner.strategy" + } + }, + "type": "object" } }, "type": "object" @@ -3076,6 +3108,13 @@ "x-cli-flag": "blocks-storage.users-scanner.cache-ttl", "x-format": "duration" }, + "clean_up_interval": { + "default": "15m0s", + "description": "How frequently user index file is updated, it only take effect when user scan stratehy is user_index.", + "type": "string", + "x-cli-flag": "blocks-storage.users-scanner.user-index.cleanup-interval", + "x-format": "duration" + }, "max_stale_period": { "default": "1h0m0s", "description": "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.", @@ -7345,6 +7384,38 @@ } }, "type": "object" + }, + "users_scanner": { + "properties": { + "cache_ttl": { + "default": "0s", + "description": "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.", + "type": "string", + "x-cli-flag": "ruler-storage.users-scanner.cache-ttl", + "x-format": "duration" + }, + "clean_up_interval": { + "default": "15m0s", + "description": "How frequently user index file is updated, it only take effect when user scan stratehy is user_index.", + "type": "string", + "x-cli-flag": "ruler-storage.users-scanner.user-index.cleanup-interval", + "x-format": "duration" + }, + "max_stale_period": { + "default": "1h0m0s", + "description": "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.", + "type": "string", + "x-cli-flag": "ruler-storage.users-scanner.user-index.max-stale-period", + "x-format": "duration" + }, + "strategy": { + "default": "list", + "description": "Strategy to use to scan users. Supported values are: list, user_index.", + "type": "string", + "x-cli-flag": "ruler-storage.users-scanner.strategy" + } + }, + "type": "object" } }, "type": "object" diff --git a/tools/thanosconvert/thanosconvert.go b/tools/thanosconvert/thanosconvert.go index 03d042cc8bc..65d31741b59 100644 --- a/tools/thanosconvert/thanosconvert.go +++ b/tools/thanosconvert/thanosconvert.go @@ -16,7 +16,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/storage/tsdb/users" + "github.com/cortexproject/cortex/pkg/util/users" ) // ThanosBlockConverter converts blocks written by Thanos to make them readable by Cortex @@ -63,7 +63,7 @@ func (c ThanosBlockConverter) Run(ctx context.Context) (Results, error) { // Hardcode user scan strategy to list. // We can safely ignore the error as it only fails if the strategy is not supported. - usersScanner, _ := users.NewScanner(cortex_tsdb.UsersScannerConfig{Strategy: cortex_tsdb.UserScanStrategyList}, c.bkt, c.logger, nil) + usersScanner, _ := users.NewScanner(users.UsersScannerConfig{Strategy: users.UserScanStrategyList}, c.bkt, c.logger, nil) // Only active users are considered. users, _, _, err := usersScanner.ScanUsers(ctx) if err != nil {