Skip to content

Commit

Permalink
Saner and consistent YAML fields in config (grafana#2273)
Browse files Browse the repository at this point in the history
* Saner YAML fields in config

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>

* Address feedback

Signed-off-by: Goutham Veeramachaneni <gouthamve@gmail.com>
  • Loading branch information
gouthamve authored Mar 26, 2020
1 parent 5d22b3a commit 00249e9
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 23 deletions.
22 changes: 11 additions & 11 deletions aws/dynamodb_storage_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,13 @@ func init() {

// DynamoDBConfig specifies config for a DynamoDB database.
type DynamoDBConfig struct {
DynamoDB flagext.URLValue
APILimit float64
ThrottleLimit float64
ApplicationAutoScaling flagext.URLValue
Metrics MetricsAutoScalingConfig
ChunkGangSize int
ChunkGetMaxParallelism int
DynamoDB flagext.URLValue `yaml:"dynamodb_url"`
APILimit float64 `yaml:"api_limit"`
ThrottleLimit float64 `yaml:"throttle_limit"`
ApplicationAutoScaling flagext.URLValue `yaml:"application_autoscaling_url"`
Metrics MetricsAutoScalingConfig `yaml:"metrics"`
ChunkGangSize int `yaml:"chunk_gang_size"`
ChunkGetMaxParallelism int `yaml:"chunk_get_max_parallelism"`
backoffConfig util.BackoffConfig
}

Expand All @@ -116,8 +116,8 @@ func (cfg *DynamoDBConfig) RegisterFlags(f *flag.FlagSet) {
f.Float64Var(&cfg.APILimit, "dynamodb.api-limit", 2.0, "DynamoDB table management requests per second limit.")
f.Float64Var(&cfg.ThrottleLimit, "dynamodb.throttle-limit", 10.0, "DynamoDB rate cap to back off when throttled.")
f.Var(&cfg.ApplicationAutoScaling, "applicationautoscaling.url", "ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded.")
f.IntVar(&cfg.ChunkGangSize, "dynamodb.chunk.gang.size", 10, "Number of chunks to group together to parallelise fetches (zero to disable)")
f.IntVar(&cfg.ChunkGetMaxParallelism, "dynamodb.chunk.get.max.parallelism", 32, "Max number of chunk-get operations to start in parallel")
f.IntVar(&cfg.ChunkGangSize, "dynamodb.chunk-gang-size", 10, "Number of chunks to group together to parallelise fetches (zero to disable)")
f.IntVar(&cfg.ChunkGetMaxParallelism, "dynamodb.chunk.get-max-parallelism", 32, "Max number of chunk-get operations to start in parallel")
f.DurationVar(&cfg.backoffConfig.MinBackoff, "dynamodb.min-backoff", 100*time.Millisecond, "Minimum backoff time")
f.DurationVar(&cfg.backoffConfig.MaxBackoff, "dynamodb.max-backoff", 50*time.Second, "Maximum backoff time")
f.IntVar(&cfg.backoffConfig.MaxRetries, "dynamodb.max-retries", 20, "Maximum number of times to retry an operation")
Expand All @@ -126,8 +126,8 @@ func (cfg *DynamoDBConfig) RegisterFlags(f *flag.FlagSet) {

// StorageConfig specifies config for storing data on AWS.
type StorageConfig struct {
DynamoDBConfig
S3Config `yaml:",inline"`
DynamoDBConfig `yaml:"dynamodb"`
S3Config `yaml:",inline"`
}

// RegisterFlags adds the flags required to config this to the given FlagSet
Expand Down
18 changes: 9 additions & 9 deletions aws/metrics_autoscaling.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,15 @@ const (

// MetricsAutoScalingConfig holds parameters to configure how it works
type MetricsAutoScalingConfig struct {
URL string // URL to contact Prometheus store on
TargetQueueLen int64 // Queue length above which we will scale up capacity
ScaleUpFactor float64 // Scale up capacity by this multiple
MinThrottling float64 // Ignore throttling below this level
QueueLengthQuery string // Promql query to fetch ingester queue length
ThrottleQuery string // Promql query to fetch throttle rate per table
UsageQuery string // Promql query to fetch write capacity usage per table
ReadUsageQuery string // Promql query to fetch read usage per table
ReadErrorQuery string // Promql query to fetch read errors per table
URL string `yaml:"url"` // URL to contact Prometheus store on
TargetQueueLen int64 `yaml:"target_queue_length"` // Queue length above which we will scale up capacity
ScaleUpFactor float64 `yaml:"scale_up_factor"` // Scale up capacity by this multiple
MinThrottling float64 `yaml:"ignore_throttle_below"` // Ignore throttling below this level
QueueLengthQuery string `yaml:"queue_length_query"` // Promql query to fetch ingester queue length
ThrottleQuery string `yaml:"write_throttle_query"` // Promql query to fetch throttle rate per table
UsageQuery string `yaml:"write_usage_query"` // Promql query to fetch write capacity usage per table
ReadUsageQuery string `yaml:"read_usage_query"` // Promql query to fetch read usage per table
ReadErrorQuery string `yaml:"read_error_query"` // Promql query to fetch read errors per table

deprecatedErrorRateQuery string
}
Expand Down
4 changes: 2 additions & 2 deletions gcp/bigtable_index_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ type Config struct {
ColumnKey bool `yaml:"-"`
DistributeKeys bool `yaml:"-"`

TableCacheEnabled bool
TableCacheExpiration time.Duration
TableCacheEnabled bool `yaml:"table_cache_enabled"`
TableCacheExpiration time.Duration `yaml:"table_cache_expiration"`
}

// RegisterFlags adds the flags required to config this to the given FlagSet
Expand Down
2 changes: 1 addition & 1 deletion storage/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ type Config struct {
BoltDBConfig local.BoltDBConfig `yaml:"boltdb"`
FSConfig local.FSConfig `yaml:"filesystem"`

IndexCacheValidity time.Duration
IndexCacheValidity time.Duration `yaml:"index_cache_validity"`

IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"`

Expand Down

0 comments on commit 00249e9

Please sign in to comment.