diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index daa628ec786..5a0eb7e05de 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -741,78 +741,61 @@ storage: # The CLI flags prefix for this block config is: ruler [configdb: ] - azure: - # Name of the blob container used to store chunks. Defaults to `cortex`. - # This container must be created before running cortex. - # CLI flag: -ruler.storage.azure.container-name - [container_name: | default = "cortex"] - - # The Microsoft Azure account name to be used - # CLI flag: -ruler.storage.azure.account-name - [account_name: | default = ""] - - # The Microsoft Azure account key to use. - # CLI flag: -ruler.storage.azure.account-key - [account_key: | default = ""] + s3: + # S3 endpoint without schema + # CLI flag: -ruler.storage.s3.endpoint + [endpoint: | default = ""] - # Preallocated buffer size for downloads (default is 512KB) - # CLI flag: -ruler.storage.azure.download-buffer-size - [download_buffer_size: | default = 512000] + # S3 bucket name + # CLI flag: -ruler.storage.s3.bucket-name + [bucket_name: | default = ""] - # Preallocated buffer size for up;oads (default is 256KB) - # CLI flag: -ruler.storage.azure.upload-buffer-size - [upload_buffer_size: | default = 256000] + # S3 secret access key + # CLI flag: -ruler.storage.s3.secret-access-key + [secret_access_key: | default = ""] - # Number of buffers used to used to upload a chunk. (defaults to 1) - # CLI flag: -ruler.storage.azure.download-buffer-count - [upload_buffer_count: | default = 1] + # S3 access key ID + # CLI flag: -ruler.storage.s3.access-key-id + [access_key_id: | default = ""] - # Timeout for requests made against azure blob storage. Defaults to 30 - # seconds. - # CLI flag: -ruler.storage.azure.request-timeout - [request_timeout: | default = 30s] + # If enabled, use http:// for the S3 endpoint instead of https://. This + # could be useful in local dev/test environments while using an + # S3-compatible backend storage, like Minio. + # CLI flag: -ruler.storage.s3.insecure + [insecure: | default = false] - # Number of retries for a request which times out. - # CLI flag: -ruler.storage.azure.max-retries - [max_retries: | default = 5] + gcs: + # GCS bucket name + # CLI flag: -ruler.storage.gcs.bucket-name + [bucket_name: | default = ""] - # Minimum time to wait before retrying a request. - # CLI flag: -ruler.storage.azure.min-retry-delay - [min_retry_delay: | default = 10ms] + # JSON representing either a Google Developers Console + # client_credentials.json file or a Google Developers service account key + # file. If empty, fallback to Google default logic. + # CLI flag: -ruler.storage.gcs.service-account + [service_account: | default = ""] - # Maximum time to wait before retrying a request. - # CLI flag: -ruler.storage.azure.max-retry-delay - [max_retry_delay: | default = 500ms] + azure: + # Azure storage account name + # CLI flag: -ruler.storage.azure.account-name + [account_name: | default = ""] - gcs: - # Name of GCS bucket to put chunks in. - # CLI flag: -ruler.storage.gcs.bucketname - [bucket_name: | default = ""] + # Azure storage account key + # CLI flag: -ruler.storage.azure.account-key + [account_key: | default = ""] - # The size of the buffer that GCS client for each PUT request. 0 to disable - # buffering. - # CLI flag: -ruler.storage.gcs.chunk-buffer-size - [chunk_buffer_size: | default = 0] + # Azure storage container name + # CLI flag: -ruler.storage.azure.container-name + [container_name: | default = ""] - # The duration after which the requests to GCS should be timed out. - # CLI flag: -ruler.storage.gcs.request-timeout - [request_timeout: | default = 0s] + # Azure storage endpoint suffix without schema. The account name will be + # prefixed to this value to create the FQDN + # CLI flag: -ruler.storage.azure.endpoint-suffix + [endpoint_suffix: | default = ""] - s3: - # S3 endpoint URL with escaped Key and Secret encoded. If only region is - # specified as a host, proper endpoint will be deduced. Use - # inmemory:/// to use a mock in-memory implementation. - # CLI flag: -ruler.storage.s3.url - [s3: | default = ] - - # Comma separated list of bucket names to evenly distribute chunks over. - # Overrides any buckets specified in s3.url flag - # CLI flag: -ruler.storage.s3.buckets - [bucketnames: | default = ""] - - # Set this to `true` to force the request to use path-style addressing. - # CLI flag: -ruler.storage.s3.force-path-style - [s3forcepathstyle: | default = false] + # Number of retries for recoverable errors + # CLI flag: -ruler.storage.azure.max-retries + [max_retries: | default = 20] # file path to store temporary rule files for the prometheus rule managers # CLI flag: -ruler.rule-path diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 60b9fbde0e7..05404b53bd3 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -159,7 +159,7 @@ func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable return nil, err } - ruleStore, err := NewRuleStorage(cfg.StoreConfig) + ruleStore, err := NewRuleStorage(cfg.StoreConfig, logger) if err != nil { return nil, err } diff --git a/pkg/ruler/rules/objectclient/rule_store.go b/pkg/ruler/rules/objectclient/rule_store.go index f03a3d5dbf7..a312c4d0a08 100644 --- a/pkg/ruler/rules/objectclient/rule_store.go +++ b/pkg/ruler/rules/objectclient/rule_store.go @@ -8,8 +8,8 @@ import ( "github.com/go-kit/kit/log/level" proto "github.com/gogo/protobuf/proto" + "github.com/thanos-io/thanos/pkg/objstore" - "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/util" ) @@ -25,19 +25,20 @@ const ( // RuleStore allows cortex rules to be stored using an object store backend. type RuleStore struct { - client chunk.ObjectClient + bucket objstore.Bucket } // NewRuleStore returns a new RuleStore -func NewRuleStore(client chunk.ObjectClient) *RuleStore { +func NewRuleStore(bucket objstore.Bucket) *RuleStore { return &RuleStore{ - client: client, + bucket: bucket, } } func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string) (*rules.RuleGroupDesc, error) { - reader, err := o.client.GetObject(ctx, objectKey) - if err == chunk.ErrStorageObjectNotFound { + + reader, err := o.bucket.Get(ctx, objectKey) + if o.bucket.IsObjNotFoundErr(err) { level.Debug(util.Logger).Log("msg", "rule group does not exist", "name", objectKey) return nil, rules.ErrGroupNotFound } @@ -64,28 +65,28 @@ func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string) (*rules. // ListAllRuleGroups returns all the active rule groups func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rules.RuleGroupList, error) { - ruleGroupObjects, err := o.client.List(ctx, generateRuleObjectKey("", "", "")) - if err != nil { - return nil, err - } - userGroupMap := map[string]rules.RuleGroupList{} - for _, obj := range ruleGroupObjects { - - user := decomposeRuleObjectKey(obj.Key) + err := o.bucket.Iter(ctx, generateRuleObjectKey("", "", ""), func(s string) error { + user := decomposeRuleObjectKey(s) if user == "" { - continue + return nil } - rg, err := o.getRuleGroup(ctx, obj.Key) + rg, err := o.getRuleGroup(ctx, s) if err != nil { - return nil, err + return err } if _, exists := userGroupMap[user]; !exists { userGroupMap[user] = rules.RuleGroupList{} } userGroupMap[user] = append(userGroupMap[user], rg) + + return nil + }) + + if err != nil { + return nil, err } return userGroupMap, nil @@ -93,22 +94,25 @@ func (o *RuleStore) ListAllRuleGroups(ctx context.Context) (map[string]rules.Rul // ListRuleGroups returns all the active rule groups for a user func (o *RuleStore) ListRuleGroups(ctx context.Context, userID, namespace string) (rules.RuleGroupList, error) { - ruleGroupObjects, err := o.client.List(ctx, generateRuleObjectKey(userID, namespace, "")) - if err != nil { - return nil, err - } - groups := []*rules.RuleGroupDesc{} - for _, obj := range ruleGroupObjects { - level.Debug(util.Logger).Log("msg", "listing rule group", "key", obj.Key) - rg, err := o.getRuleGroup(ctx, obj.Key) + err := o.bucket.Iter(ctx, generateRuleObjectKey(userID, namespace, ""), func(s string) error { + level.Debug(util.Logger).Log("msg", "listing rule group", "key", s) + + rg, err := o.getRuleGroup(ctx, s) if err != nil { - level.Error(util.Logger).Log("msg", "unable to retrieve rule group", "err", err, "key", obj.Key) - return nil, err + level.Error(util.Logger).Log("msg", "unable to retrieve rule group", "err", err, "key", s) + return err } groups = append(groups, rg) + + return nil + }) + + if err != nil { + return nil, err } + return groups, nil } @@ -131,14 +135,14 @@ func (o *RuleStore) SetRuleGroup(ctx context.Context, userID string, namespace s } objectKey := generateRuleObjectKey(userID, namespace, group.Name) - return o.client.PutObject(ctx, objectKey, bytes.NewReader(data)) + return o.bucket.Upload(ctx, objectKey, bytes.NewReader(data)) } // DeleteRuleGroup deletes the specified rule group func (o *RuleStore) DeleteRuleGroup(ctx context.Context, userID string, namespace string, groupName string) error { objectKey := generateRuleObjectKey(userID, namespace, groupName) - err := o.client.DeleteObject(ctx, objectKey) - if err == chunk.ErrStorageObjectNotFound { + err := o.bucket.Delete(ctx, objectKey) + if o.bucket.IsObjNotFoundErr(err) { return rules.ErrGroupNotFound } return err diff --git a/pkg/ruler/storage.go b/pkg/ruler/storage.go index 6bface18117..ce1518789eb 100644 --- a/pkg/ruler/storage.go +++ b/pkg/ruler/storage.go @@ -5,13 +5,15 @@ import ( "flag" "fmt" - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/chunk/aws" - "github.com/cortexproject/cortex/pkg/chunk/azure" - "github.com/cortexproject/cortex/pkg/chunk/gcp" + "github.com/go-kit/kit/log" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/cortexproject/cortex/pkg/configs/client" "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/ruler/rules/objectclient" + "github.com/cortexproject/cortex/pkg/storage/backend/azure" + "github.com/cortexproject/cortex/pkg/storage/backend/gcs" + "github.com/cortexproject/cortex/pkg/storage/backend/s3" ) // RuleStoreConfig conigures a rule store @@ -20,9 +22,9 @@ type RuleStoreConfig struct { ConfigDB client.Config `yaml:"configdb"` // Object Storage Configs - Azure azure.BlobStorageConfig `yaml:"azure"` - GCS gcp.GCSConfig `yaml:"gcs"` - S3 aws.S3Config `yaml:"s3"` + S3 s3.Config `yaml:"s3"` + GCS gcs.Config `yaml:"gcs"` + Azure azure.Config `yaml:"azure"` mock rules.RuleStore `yaml:"-"` } @@ -37,7 +39,7 @@ func (cfg *RuleStoreConfig) RegisterFlags(f *flag.FlagSet) { } // NewRuleStorage returns a new rule storage backend poller and store -func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { +func NewRuleStorage(cfg RuleStoreConfig, logger log.Logger) (rules.RuleStore, error) { if cfg.mock != nil { return cfg.mock, nil } @@ -52,17 +54,17 @@ func NewRuleStorage(cfg RuleStoreConfig) (rules.RuleStore, error) { return rules.NewConfigRuleStore(c), nil case "azure": - return newObjRuleStore(azure.NewBlobStorage(&cfg.Azure, "")) + return newObjRuleStore(azure.NewBucketClient(cfg.Azure, "cortex-ruler", logger)) case "gcs": - return newObjRuleStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCS, "")) + return newObjRuleStore(gcs.NewBucketClient(context.Background(), cfg.GCS, "cortex-ruler", logger)) case "s3": - return newObjRuleStore(aws.NewS3ObjectClient(cfg.S3, "")) + return newObjRuleStore(s3.NewBucketClient(cfg.S3, "cortex-ruler", logger)) default: return nil, fmt.Errorf("Unrecognized rule storage mode %v, choose one of: configdb, gcs", cfg.Type) } } -func newObjRuleStore(client chunk.ObjectClient, err error) (rules.RuleStore, error) { +func newObjRuleStore(client objstore.Bucket, err error) (rules.RuleStore, error) { if err != nil { return nil, err } diff --git a/pkg/storage/backend/azure/config.go b/pkg/storage/backend/azure/config.go index d8748fdc765..4e2ec033657 100644 --- a/pkg/storage/backend/azure/config.go +++ b/pkg/storage/backend/azure/config.go @@ -15,11 +15,16 @@ type Config struct { MaxRetries int `yaml:"max_retries"` } -// RegisterFlags registers the flags for TSDB Azure storage +// RegisterFlags registers the flags for TSDB Azure storage with the TSDB prefix func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.StorageAccountName, "experimental.tsdb.azure.account-name", "", "Azure storage account name") - f.Var(&cfg.StorageAccountKey, "experimental.tsdb.azure.account-key", "Azure storage account key") - f.StringVar(&cfg.ContainerName, "experimental.tsdb.azure.container-name", "", "Azure storage container name") - f.StringVar(&cfg.Endpoint, "experimental.tsdb.azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN") - f.IntVar(&cfg.MaxRetries, "experimental.tsdb.azure.max-retries", 20, "Number of retries for recoverable errors") + cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) +} + +// RegisterFlags registers the flags for Azure storage with the specified prefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.StorageAccountName, prefix+"azure.account-name", "", "Azure storage account name") + f.Var(&cfg.StorageAccountKey, prefix+"azure.account-key", "Azure storage account key") + f.StringVar(&cfg.ContainerName, prefix+"azure.container-name", "", "Azure storage container name") + f.StringVar(&cfg.Endpoint, prefix+"azure.endpoint-suffix", "", "Azure storage endpoint suffix without schema. The account name will be prefixed to this value to create the FQDN") + f.IntVar(&cfg.MaxRetries, prefix+"azure.max-retries", 20, "Number of retries for recoverable errors") } diff --git a/pkg/storage/backend/filesystem/config.go b/pkg/storage/backend/filesystem/config.go index 03a4f85f5b7..b7aa74674ca 100644 --- a/pkg/storage/backend/filesystem/config.go +++ b/pkg/storage/backend/filesystem/config.go @@ -9,5 +9,10 @@ type Config struct { // RegisterFlags registers the flags for TSDB filesystem storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Directory, "experimental.tsdb.filesystem.dir", "", "Local filesystem storage directory.") + cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) +} + +// RegisterFlags registers the flags for filesystem storage with the specified prefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Directory, prefix+"filesystem.dir", "", "Local filesystem storage directory.") } diff --git a/pkg/storage/backend/gcs/config.go b/pkg/storage/backend/gcs/config.go index 0363b9579b6..f52da4c7fc1 100644 --- a/pkg/storage/backend/gcs/config.go +++ b/pkg/storage/backend/gcs/config.go @@ -12,8 +12,13 @@ type Config struct { ServiceAccount flagext.Secret `yaml:"service_account"` } -// RegisterFlags registers the flags for TSDB GCS storage +// RegisterFlags registers the flags for TSDB GCS storage with the TSDB prefix func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.BucketName, "experimental.tsdb.gcs.bucket-name", "", "GCS bucket name") - f.Var(&cfg.ServiceAccount, "experimental.tsdb.gcs.service-account", "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.") + cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) +} + +// RegisterFlags registers the flags for GCS storage with the specified prefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.BucketName, prefix+"gcs.bucket-name", "", "GCS bucket name") + f.Var(&cfg.ServiceAccount, prefix+"gcs.service-account", "JSON representing either a Google Developers Console client_credentials.json file or a Google Developers service account key file. If empty, fallback to Google default logic.") } diff --git a/pkg/storage/backend/s3/config.go b/pkg/storage/backend/s3/config.go index 60e52d9f9b7..cc5b7f96ea4 100644 --- a/pkg/storage/backend/s3/config.go +++ b/pkg/storage/backend/s3/config.go @@ -17,9 +17,14 @@ type Config struct { // RegisterFlags registers the flags for TSDB s3 storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.AccessKeyID, "experimental.tsdb.s3.access-key-id", "", "S3 access key ID") - f.Var(&cfg.SecretAccessKey, "experimental.tsdb.s3.secret-access-key", "S3 secret access key") - f.StringVar(&cfg.BucketName, "experimental.tsdb.s3.bucket-name", "", "S3 bucket name") - f.StringVar(&cfg.Endpoint, "experimental.tsdb.s3.endpoint", "", "S3 endpoint without schema") - f.BoolVar(&cfg.Insecure, "experimental.tsdb.s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.") + cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) +} + +// RegisterFlags registers the flags for s3 storage with the specified prefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "S3 access key ID") + f.Var(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "S3 secret access key") + f.StringVar(&cfg.BucketName, prefix+"s3.bucket-name", "", "S3 bucket name") + f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "S3 endpoint without schema") + f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.") }