diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index dd2733123737..906206e3025a 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -137,14 +137,14 @@ local promtail_win() = pipeline('promtail-windows') { steps: [ { name: 'identify-runner', - image: 'golang:windowsservercore-1809', + image: 'golang:1.19-windowsservercore-1809', commands: [ 'Write-Output $env:DRONE_RUNNER_NAME', ], }, { name: 'test', - image: 'golang:windowsservercore-1809', + image: 'golang:1.19-windowsservercore-1809', commands: [ 'go test .\\clients\\pkg\\promtail\\targets\\windows\\... -v', ], diff --git a/.drone/drone.yml b/.drone/drone.yml index bef90358861c..8a4112706fcd 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -1279,11 +1279,11 @@ platform: steps: - commands: - Write-Output $env:DRONE_RUNNER_NAME - image: golang:windowsservercore-1809 + image: golang:1.19-windowsservercore-1809 name: identify-runner - commands: - go test .\clients\pkg\promtail\targets\windows\... -v - image: golang:windowsservercore-1809 + image: golang:1.19-windowsservercore-1809 name: test trigger: ref: @@ -1675,6 +1675,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: fbc93106cac3a989c4b910682f2e08eadc21786d8502f3270bc2f4a5b1996b21 +hmac: c1930caa8e7ffedf82b641cc1204d87319cddf576efa787d2ef1a86d16c2b9cf ... diff --git a/.github/workflows/doc-validator.yml b/.github/workflows/doc-validator.yml index 9cf99f19dae3..5300fede5c95 100644 --- a/.github/workflows/doc-validator.yml +++ b/.github/workflows/doc-validator.yml @@ -17,5 +17,6 @@ jobs: run: > doc-validator --include=$(git config --global --add safe.directory $(realpath .); printf '^docs/sources/(%s)$' "$(git --no-pager diff --name-only --diff-filter=ACMRT origin/${{ github.event.pull_request.base.ref }}...${{ github.event.pull_request.head.sha }} -- docs/sources | sed 's/^docs\/sources\///' | awk -F'\n' '{if(NR == 1) {printf $0} else {printf "|"$0}}')") + --skip-image-validation ./docs/sources /docs/loki/latest diff --git a/.github/workflows/operator-images.yaml b/.github/workflows/operator-images.yaml index 07d3a73d0162..725d70735652 100644 --- a/.github/workflows/operator-images.yaml +++ b/.github/workflows/operator-images.yaml @@ -47,7 +47,7 @@ jobs: ( IFS=$','; echo "${TAGS[*]}" ) - name: Build and publish image on quay.io - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v4 with: context: ./operator push: true @@ -85,7 +85,7 @@ jobs: ( IFS=$','; echo "${TAGS[*]}" ) - name: Build and publish image on quay.io - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v4 with: context: ./operator file: ./operator/bundle.Dockerfile @@ -124,7 +124,7 @@ jobs: ( IFS=$','; echo "${TAGS[*]}" ) - name: Build and publish image on quay.io - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v4 with: context: ./operator file: ./operator/calculator.Dockerfile diff --git a/.github/workflows/snyk.yml b/.github/workflows/snyk.yml new file mode 100644 index 000000000000..1c4c8a361678 --- /dev/null +++ b/.github/workflows/snyk.yml @@ -0,0 +1,15 @@ +name: Snyk Monitor Scanning +on: + release: + types: [published] + push: + branches: + - 'main' + - 'master' + workflow_dispatch: + +jobs: + snyk-scan-ci: + uses: 'grafana/security-github-actions/.github/workflows/snyk_monitor.yml@main' + secrets: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 46c7aaf7e6b5..1877d5bcc45c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,6 @@ * [7925](https://github.com/grafana/loki/pull/7925) **sandeepsukhani**: Fix bugs in logs results caching causing query-frontend to return logs outside of query window. * [8120](https://github.com/grafana/loki/pull/8120) **ashwanthgoli** fix panic on hitting /scheduler/ring when ring is disabled. * [8251](https://github.com/grafana/loki/pull/8251) **sandeepsukhani** index-store: fix indexing of chunks overlapping multiple schemas. -* [8120](https://github.com/grafana/loki/pull/8232) **TaehyunHwang** Fix version info issue that shows wrong version. * [8151](https://github.com/grafana/loki/pull/8151) **sandeepsukhani** fix log deletion with line filters. ##### Changes @@ -66,6 +65,12 @@ ##### Changes +#### LogCLI + +##### Enhancement + +* [8413](https://github.com/grafana/loki/pull/8413) **chaudum**: Try to load tenant-specific `schemaconfig-{orgID}.yaml` when using `--remote-schema` argument and fallback to global `schemaconfig.yaml`. + #### Fluent Bit #### Loki Canary @@ -77,6 +82,10 @@ #### Jsonnet * [7923](https://github.com/grafana/loki/pull/7923) **manohar-koukuntla**: Add zone aware ingesters in jsonnet deployment +##### Fixes + +* [8247](https://github.com/grafana/loki/pull/8247) **Whyeasy** fix usage of cluster label within Mixin. + #### Build * [7938](https://github.com/grafana/loki/pull/7938) **ssncferreira**: Add DroneCI pipeline step to validate configuration flags documentation generation. @@ -85,6 +94,18 @@ ### Dependencies +## 2.7.3 (2023-02-01) + +#### Loki + +##### Fixes + +* [8340](https://github.com/grafana/loki/pull/8340) **MasslessParticle** Fix bug in compactor that caused panics when `startTime` and `endTime` of a delete request are equal. + +#### Build + +* [8232](https://github.com/grafana/loki/pull/8232) **TaehyunHwang** Fix build issue that caused `--version` to show wrong version for Loki and Promtail binaries. + ## 2.7.2 (2023-01-25) #### Loki @@ -185,6 +206,7 @@ Check the history of the branch `release-2.7.x`. * [5400](https://github.com/grafana/loki/pull/5400) **BenoitKnecht**: promtail/server: Disable profiling by default #### Promtail +* [7470](https://github.com/grafana/loki/pull/7470) **Jack-King**: Add configuration for adding custom HTTP headers to push requests ##### Enhancements * [7593](https://github.com/grafana/loki/pull/7593) **chodges15**: Promtail: Add tenant label to client drop metrics and logs @@ -285,6 +307,7 @@ Here is the list with the changes that were produced since the previous release. ##### Fixes * [6034](https://github.com/grafana/loki/pull/6034) **DylanGuedes**: Promtail: Fix symlink tailing behavior. ##### Changes +* [6371](https://github.com/grafana/loki/pull/6371) **witalisoft**: BREAKING: Support more complex match based on multiple extracted data fields in drop stage * [5686](https://github.com/grafana/loki/pull/5686) **ssncferreira**: Move promtail StreamLagLabels config to upper level config.Config * [5839](https://github.com/grafana/loki/pull/5839) **marctc**: Add ActiveTargets method to promtail * [5661](https://github.com/grafana/loki/pull/5661) **masslessparticle**: Invalidate caches on deletes diff --git a/clients/cmd/docker-driver/config_test.go b/clients/cmd/docker-driver/config_test.go index b20d40ee2347..a3920778b622 100644 --- a/clients/cmd/docker-driver/config_test.go +++ b/clients/cmd/docker-driver/config_test.go @@ -81,7 +81,7 @@ var pipeline = PipelineConfig{ } func Test_parsePipeline(t *testing.T) { - f, err := os.CreateTemp("/tmp", "Test_parsePipeline") + f, err := os.CreateTemp("", "Test_parsePipeline") if err != nil { t.Fatal(err) } diff --git a/clients/cmd/fluent-bit/dque.go b/clients/cmd/fluent-bit/dque.go index ad4ec2318ec5..f7091de893f5 100644 --- a/clients/cmd/fluent-bit/dque.go +++ b/clients/cmd/fluent-bit/dque.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os" + "path/filepath" "sync" "time" @@ -25,7 +26,7 @@ type dqueConfig struct { } var defaultDqueConfig = dqueConfig{ - queueDir: "/tmp/flb-storage/loki", + queueDir: filepath.Join(os.TempDir(), "flb-storage/loki"), queueSegmentSize: 500, queueSync: false, queueName: "dque", diff --git a/clients/cmd/fluentd/Dockerfile b/clients/cmd/fluentd/Dockerfile index 2e0f9e902367..caa411889fea 100644 --- a/clients/cmd/fluentd/Dockerfile +++ b/clients/cmd/fluentd/Dockerfile @@ -1,4 +1,4 @@ -FROM ruby:2.7.5 as build +FROM ruby:3.2.0 as build ENV DEBIAN_FRONTEND=noninteractive @@ -9,7 +9,7 @@ COPY . /src/loki WORKDIR /src/loki RUN make BUILD_IN_CONTAINER=false fluentd-plugin -FROM fluent/fluentd:v1.9.2-debian-1.0 +FROM fluent/fluentd:v1.14.0-debian-1.0 ENV LOKI_URL "https://logs-prod-us-central1.grafana.net" COPY --from=build /src/loki/clients/cmd/fluentd/lib/fluent/plugin/out_loki.rb /fluentd/plugins/out_loki.rb diff --git a/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec b/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec index 1c7d7cab9179..433a1a53a1bd 100644 --- a/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec +++ b/clients/cmd/fluentd/fluent-plugin-grafana-loki.gemspec @@ -13,7 +13,7 @@ Gem::Specification.new do |spec| spec.homepage = 'https://github.com/grafana/loki/' spec.license = 'Apache-2.0' - spec.required_ruby_version = '~> 2.7' + spec.required_ruby_version = '>= 2.7' # test_files, files = `git ls-files -z`.split("\x0").partition do |f| # f.match(%r{^(test|spec|features)/}) diff --git a/clients/cmd/promtail/main.go b/clients/cmd/promtail/main.go index dec85b76d9fe..5d20460628c8 100644 --- a/clients/cmd/promtail/main.go +++ b/clients/cmd/promtail/main.go @@ -6,6 +6,7 @@ import ( "os" "reflect" "sync" + // embed time zone data _ "time/tzdata" diff --git a/clients/pkg/logentry/stages/drop.go b/clients/pkg/logentry/stages/drop.go index ebab27ba1aca..f3b8c4ded530 100644 --- a/clients/pkg/logentry/stages/drop.go +++ b/clients/pkg/logentry/stages/drop.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" "regexp" + "strings" "time" "github.com/go-kit/log" @@ -16,22 +17,27 @@ import ( ) const ( - ErrDropStageEmptyConfig = "drop stage config must contain at least one of `source`, `expression`, `older_than` or `longer_than`" - ErrDropStageInvalidDuration = "drop stage invalid duration, %v cannot be converted to a duration: %v" - ErrDropStageInvalidConfig = "drop stage config error, `value` and `expression` cannot both be defined at the same time." - ErrDropStageInvalidRegex = "drop stage regex compilation error: %v" - ErrDropStageInvalidByteSize = "drop stage failed to parse longer_than to bytes: %v" + ErrDropStageEmptyConfig = "drop stage config must contain at least one of `source`, `expression`, `older_than` or `longer_than`" + ErrDropStageInvalidDuration = "drop stage invalid duration, %v cannot be converted to a duration: %v" + ErrDropStageInvalidConfig = "drop stage config error, `value` and `expression` cannot both be defined at the same time." + ErrDropStageInvalidRegex = "drop stage regex compilation error: %v" + ErrDropStageInvalidByteSize = "drop stage failed to parse longer_than to bytes: %v" + ErrDropStageInvalidSource = "drop stage source invalid type should be string or list of strings" + ErrDropStageNoSourceWithValue = "drop stage config must contain `source` if `value` is specified" ) var ( defaultDropReason = "drop_stage" + defaultSeparator = ";" ) // DropConfig contains the configuration for a dropStage type DropConfig struct { - DropReason *string `mapstructure:"drop_counter_reason"` - Source *string `mapstructure:"source"` + DropReason *string `mapstructure:"drop_counter_reason"` + Source interface{} `mapstructure:"source"` + source *[]string Value *string `mapstructure:"value"` + Separator *string `mapstructure:"separator"` Expression *string `mapstructure:"expression"` regex *regexp.Regexp OlderThan *string `mapstructure:"older_than"` @@ -46,9 +52,19 @@ func validateDropConfig(cfg *DropConfig) error { (cfg.Source == nil && cfg.Expression == nil && cfg.OlderThan == nil && cfg.LongerThan == nil) { return errors.New(ErrDropStageEmptyConfig) } + if cfg.Source != nil { + src, err := unifySourceField(cfg.Source) + if err != nil { + return err + } + cfg.source = &src + } if cfg.DropReason == nil || *cfg.DropReason == "" { cfg.DropReason = &defaultDropReason } + if cfg.Separator == nil { + cfg.Separator = &defaultSeparator + } if cfg.OlderThan != nil { dur, err := time.ParseDuration(*cfg.OlderThan) if err != nil { @@ -56,6 +72,9 @@ func validateDropConfig(cfg *DropConfig) error { } cfg.olderThan = dur } + if cfg.Value != nil && cfg.Source == nil { + return errors.New(ErrDropStageNoSourceWithValue) + } if cfg.Value != nil && cfg.Expression != nil { return errors.New(ErrDropStageInvalidConfig) } @@ -66,6 +85,15 @@ func validateDropConfig(cfg *DropConfig) error { } cfg.regex = expr } + // The first step to exclude `value` and fully replace it with the `expression`. + // It will simplify code and less confusing for the end-user on which option to choose. + if cfg.Value != nil { + expr, err := regexp.Compile(fmt.Sprintf("^%s$", regexp.QuoteMeta(*cfg.Value))) + if err != nil { + return errors.Errorf(ErrDropStageInvalidRegex, err) + } + cfg.regex = expr + } if cfg.LongerThan != nil { err := cfg.longerThan.Set(*cfg.LongerThan) if err != nil { @@ -75,6 +103,17 @@ func validateDropConfig(cfg *DropConfig) error { return nil } +// unifySourceField unify Source into a slice of strings +func unifySourceField(s interface{}) ([]string, error) { + switch s := s.(type) { + case []string: + return s, nil + case string: + return []string{s}, nil + } + return nil, errors.New(ErrDropStageInvalidSource) +} + // newDropStage creates a DropStage from config func newDropStage(logger log.Logger, config interface{}, registerer prometheus.Registerer) (Stage, error) { cfg := &DropConfig{} @@ -150,34 +189,17 @@ func (m *dropStage) shouldDrop(e Entry) bool { return false } } - - if m.cfg.Source != nil && m.cfg.Expression == nil { - if v, ok := e.Extracted[*m.cfg.Source]; ok { - if m.cfg.Value == nil { - // Found in map, no value set meaning drop if found in map - if Debug { - level.Debug(m.logger).Log("msg", "line met drop criteria for finding source key in extracted map") - } - } else { - s, err := getString(v) - if err != nil { - if Debug { - level.Debug(m.logger).Log("msg", "line will not be dropped, failed to convert extracted map value to string", "err", err, "type", reflect.TypeOf(v)) - } - return false - } - if *m.cfg.Value == s { - // Found in map with value set for drop - if Debug { - level.Debug(m.logger).Log("msg", "line met drop criteria for finding source key in extracted map with value matching desired drop value") - } - } else { - // Value doesn't match, don't drop - if Debug { - level.Debug(m.logger).Log("msg", fmt.Sprintf("line will not be dropped, source key was found in extracted map but value '%v' did not match desired value '%v'", s, *m.cfg.Value)) - } - return false - } + if m.cfg.Source != nil && m.cfg.regex == nil { + var match bool + match = true + for _, src := range *m.cfg.source { + if _, ok := e.Extracted[src]; !ok { + match = false + } + } + if match { + if Debug { + level.Debug(m.logger).Log("msg", "line met drop criteria for finding source key in extracted map") } } else { // Not found in extact map, don't drop @@ -188,48 +210,42 @@ func (m *dropStage) shouldDrop(e Entry) bool { } } - if m.cfg.Expression != nil { - if m.cfg.Source != nil { - if v, ok := e.Extracted[*m.cfg.Source]; ok { - s, err := getString(v) + if m.cfg.Source == nil && m.cfg.regex != nil { + if !m.cfg.regex.MatchString(e.Line) { + // Not a match to the regex, don't drop + if Debug { + level.Debug(m.logger).Log("msg", "line will not be dropped, the provided regular expression did not match the log line") + } + return false + } + if Debug { + level.Debug(m.logger).Log("msg", "line met drop criteria, the provided regular expression matched the log line") + } + } + + if m.cfg.Source != nil && m.cfg.regex != nil { + var extractedData []string + for _, src := range *m.cfg.source { + if e, ok := e.Extracted[src]; ok { + s, err := getString(e) if err != nil { if Debug { - level.Debug(m.logger).Log("msg", "Failed to convert extracted map value to string, cannot test regex line will not be dropped.", "err", err, "type", reflect.TypeOf(v)) - } - return false - } - match := m.cfg.regex.FindStringSubmatch(s) - if match == nil { - // Not a match to the regex, don't drop - if Debug { - level.Debug(m.logger).Log("msg", fmt.Sprintf("line will not be dropped, the provided regular expression did not match the value found in the extracted map for source key: %v", *m.cfg.Source)) + level.Debug(m.logger).Log("msg", "Failed to convert extracted map value to string, cannot test regex line will not be dropped.", "err", err, "type", reflect.TypeOf(e)) } return false } - // regex match, will be dropped - if Debug { - level.Debug(m.logger).Log("msg", "line met drop criteria, regex matched the value in the extracted map source key") - } - - } else { - // Not found in extact map, don't drop - if Debug { - level.Debug(m.logger).Log("msg", "line will not be dropped, the provided source was not found in the extracted map") - } - return false - } - } else { - match := m.cfg.regex.FindStringSubmatch(e.Line) - if match == nil { - // Not a match to the regex, don't drop - if Debug { - level.Debug(m.logger).Log("msg", "line will not be dropped, the provided regular expression did not match the log line") - } - return false + extractedData = append(extractedData, s) } + } + if !m.cfg.regex.MatchString(strings.Join(extractedData, *m.cfg.Separator)) { + // Not a match to the regex, don't drop if Debug { - level.Debug(m.logger).Log("msg", "line met drop criteria, the provided regular expression matched the log line") + level.Debug(m.logger).Log("msg", "line will not be dropped, the provided regular expression did not match the log line") } + return false + } + if Debug { + level.Debug(m.logger).Log("msg", "line met drop criteria, the provided regular expression matched the log line") } } diff --git a/clients/pkg/logentry/stages/drop_test.go b/clients/pkg/logentry/stages/drop_test.go index daafc47097ea..c27af36b98ae 100644 --- a/clients/pkg/logentry/stages/drop_test.go +++ b/clients/pkg/logentry/stages/drop_test.go @@ -23,15 +23,17 @@ pipeline_stages: app: msg: - drop: - source: src + sources: + - src expression: ".*test.*" older_than: 24h longer_than: 8kb - drop: expression: ".*app1.*" - drop: - source: app - value: loki + sources: + - app + expression: loki - drop: longer_than: 10000 ` @@ -105,7 +107,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Matched Source", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", }, labels: model.LabelSet{}, extracted: map[string]interface{}{ @@ -116,7 +118,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Did not match Source", config: &DropConfig{ - Source: ptrFromString("key1"), + Source: "key1", }, labels: model.LabelSet{}, extracted: map[string]interface{}{ @@ -127,7 +129,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Matched Source and Value", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", Value: ptrFromString("val1"), }, labels: model.LabelSet{}, @@ -139,7 +141,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Did not match Source and Value", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", Value: ptrFromString("val1"), }, labels: model.LabelSet{}, @@ -151,7 +153,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Matched Source(int) and Value(string)", config: &DropConfig{ - Source: ptrFromString("level"), + Source: "level", Value: ptrFromString("50"), }, labels: model.LabelSet{}, @@ -163,7 +165,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Matched Source(string) and Value(string)", config: &DropConfig{ - Source: ptrFromString("level"), + Source: "level", Value: ptrFromString("50"), }, labels: model.LabelSet{}, @@ -175,7 +177,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Did not match Source(int) and Value(string)", config: &DropConfig{ - Source: ptrFromString("level"), + Source: "level", Value: ptrFromString("50"), }, labels: model.LabelSet{}, @@ -187,7 +189,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Did not match Source(string) and Value(string)", config: &DropConfig{ - Source: ptrFromString("level"), + Source: "level", Value: ptrFromString("50"), }, labels: model.LabelSet{}, @@ -197,21 +199,87 @@ func Test_dropStage_Process(t *testing.T) { shouldDrop: false, }, { - name: "Regex Matched Source and Value", + name: "Matched Source and Value with multiple sources", config: &DropConfig{ - Source: ptrFromString("key"), - Expression: ptrFromString(".*val.*"), + Source: []string{"key1", "key2"}, + Value: ptrFromString(`val1;val200.*`), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ - "key": "val1", + "key1": "val1", + "key2": "val200.*", + }, + shouldDrop: true, + }, + { + name: "Matched Source and Value with multiple sources and custom separator", + config: &DropConfig{ + Source: []string{"key1", "key2"}, + Separator: ptrFromString("|"), + Value: ptrFromString(`val1|val200[a]`), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key1": "val1", + "key2": "val200[a]", + }, + shouldDrop: true, + }, + { + name: "Regex Matched Source(int) and Expression", + config: &DropConfig{ + Source: "key", + Expression: ptrFromString("50"), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key": 50, + }, + shouldDrop: true, + }, + { + name: "Regex Matched Source(string) and Expression", + config: &DropConfig{ + Source: "key", + Expression: ptrFromString("50"), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key": "50", + }, + shouldDrop: true, + }, + { + name: "Regex Matched Source and Expression with multiple sources", + config: &DropConfig{ + Source: []string{"key1", "key2"}, + Expression: ptrFromString(`val\d{1};val\d{3}$`), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key1": "val1", + "key2": "val200", + }, + shouldDrop: true, + }, + { + name: "Regex Matched Source and Expression with multiple sources and custom separator", + config: &DropConfig{ + Source: []string{"key1", "key2"}, + Separator: ptrFromString("#"), + Expression: ptrFromString(`val\d{1}#val\d{3}$`), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key1": "val1", + "key2": "val200", }, shouldDrop: true, }, { - name: "Regex Did not match Source and Value", + name: "Regex Did not match Source and Expression", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, @@ -220,10 +288,37 @@ func Test_dropStage_Process(t *testing.T) { }, shouldDrop: false, }, + { + name: "Regex Did not match Source and Expression with multiple sources", + config: &DropConfig{ + Source: []string{"key1", "key2"}, + Expression: ptrFromString(`match\d+;match\d+`), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key1": "match1", + "key2": "notmatch2", + }, + shouldDrop: false, + }, + { + name: "Regex Did not match Source and Expression with multiple sources and custom separator", + config: &DropConfig{ + Source: []string{"key1", "key2"}, + Separator: ptrFromString("#"), + Expression: ptrFromString(`match\d;match\d`), + }, + labels: model.LabelSet{}, + extracted: map[string]interface{}{ + "key1": "match1", + "key2": "match2", + }, + shouldDrop: false, + }, { name: "Regex No Matching Source", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, @@ -255,7 +350,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Match Source and Length Both Match", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, @@ -268,7 +363,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Match Source and Length Only First Matches", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, @@ -281,7 +376,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Match Source and Length Only Second Matches", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, @@ -294,7 +389,7 @@ func Test_dropStage_Process(t *testing.T) { { name: "Everything Must Match", config: &DropConfig{ - Source: ptrFromString("key"), + Source: "key", Expression: ptrFromString(".*val.*"), OlderThan: ptrFromString("1h"), LongerThan: ptrFromString("10b"), @@ -348,8 +443,6 @@ func TestDropPipeline(t *testing.T) { var ( dropInvalidDur = "10y" - dropVal = "msg" - dropRegex = ".*blah" dropInvalidRegex = "(?P 0 { + for k, v := range c.cfg.Headers { + if req.Header.Get(k) == "" { + req.Header.Add(k, v) + } else { + level.Warn(c.logger).Log("msg", "custom header key already exists, skipping", "key", k) + } + } + } + resp, err := c.client.Do(req) if err != nil { return -1, err diff --git a/clients/pkg/promtail/client/config.go b/clients/pkg/promtail/client/config.go index fc34c99d8de6..d38aa59b75a5 100644 --- a/clients/pkg/promtail/client/config.go +++ b/clients/pkg/promtail/client/config.go @@ -28,7 +28,8 @@ type Config struct { BatchWait time.Duration `yaml:"batchwait"` BatchSize int `yaml:"batchsize"` - Client config.HTTPClientConfig `yaml:",inline"` + Client config.HTTPClientConfig `yaml:",inline"` + Headers map[string]string `yaml:"headers,omitempty"` BackoffConfig backoff.Config `yaml:"backoff_config"` // The labels to add to any time series or alerts when communicating with loki diff --git a/clients/pkg/promtail/config/config_test.go b/clients/pkg/promtail/config/config_test.go index 5c6519aae221..04cd09f56dfc 100644 --- a/clients/pkg/promtail/config/config_test.go +++ b/clients/pkg/promtail/config/config_test.go @@ -39,12 +39,30 @@ limits_config: readline_burst: 200 ` +const headersTestFile = ` +clients: + - name: custom-headers + url: https://1:shh@example.com/loki/api/v1/push + headers: + name: value +` + func Test_Load(t *testing.T) { var dst Config err := yaml.Unmarshal([]byte(testFile), &dst) require.Nil(t, err) } +func TestHeadersConfigLoad(t *testing.T) { + var dst Config + err := yaml.Unmarshal([]byte(headersTestFile), &dst) + require.Nil(t, err) + + for _, clientConfig := range dst.ClientConfigs { + require.Equal(t, map[string]string{"name": "value"}, clientConfig.Headers) + } +} + func Test_RateLimitLoad(t *testing.T) { var dst Config err := yaml.Unmarshal([]byte(testFile), &dst) diff --git a/clients/pkg/promtail/positions/positions_test.go b/clients/pkg/promtail/positions/positions_test.go index 9bbff9fa17aa..1dce97b08ec7 100644 --- a/clients/pkg/promtail/positions/positions_test.go +++ b/clients/pkg/promtail/positions/positions_test.go @@ -40,7 +40,7 @@ func TestReadPositionsOK(t *testing.T) { }() yaml := []byte(`positions: - /tmp/random.log: "17623" + /log/path/random.log: "17623" `) err := os.WriteFile(temp, yaml, 0644) if err != nil { @@ -52,7 +52,7 @@ func TestReadPositionsOK(t *testing.T) { }, log.NewNopLogger()) require.NoError(t, err) - require.Equal(t, "17623", pos["/tmp/random.log"]) + require.Equal(t, "17623", pos["/log/path/random.log"]) } func TestReadPositionsEmptyFile(t *testing.T) { @@ -145,7 +145,7 @@ func Test_ReadOnly(t *testing.T) { _ = os.Remove(temp) }() yaml := []byte(`positions: - /tmp/random.log: "17623" + /log/path/random.log: "17623" `) err := os.WriteFile(temp, yaml, 0644) if err != nil { @@ -162,7 +162,7 @@ func Test_ReadOnly(t *testing.T) { defer p.Stop() p.Put("/foo/bar/f", 12132132) p.PutString("/foo/f", "100") - pos, err := p.Get("/tmp/random.log") + pos, err := p.Get("/log/path/random.log") if err != nil { t.Fatal(err) } @@ -176,7 +176,7 @@ func Test_ReadOnly(t *testing.T) { require.NoError(t, err) require.Equal(t, map[string]string{ - "/tmp/random.log": "17623", + "/log/path/random.log": "17623", }, out) } diff --git a/clients/pkg/promtail/promtail_test.go b/clients/pkg/promtail/promtail_test.go index 6f6501b59d21..04a61cf25f54 100644 --- a/clients/pkg/promtail/promtail_test.go +++ b/clients/pkg/promtail/promtail_test.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "math" - "math/rand" "net" "net/http" "net/url" @@ -39,6 +38,7 @@ import ( "github.com/grafana/loki/clients/pkg/promtail/server" pserver "github.com/grafana/loki/clients/pkg/promtail/server" file2 "github.com/grafana/loki/clients/pkg/promtail/targets/file" + "github.com/grafana/loki/clients/pkg/promtail/targets/testutils" "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/util" @@ -54,8 +54,8 @@ func TestPromtail(t *testing.T) { logger = level.NewFilter(logger, level.AllowInfo()) util_log.Logger = logger - initRandom() - dirName := "/tmp/promtail_test_" + randName() + testutils.InitRandom() + dirName := filepath.Join(os.TempDir(), "/promtail_test_"+testutils.RandName()) positionsFileName := dirName + "/positions.yml" err := os.MkdirAll(dirName, 0o750) @@ -641,22 +641,8 @@ func buildTestConfig(t *testing.T, positionsFileName string, logDirName string) return cfg } -func initRandom() { - rand.Seed(time.Now().UnixNano()) -} - -var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func randName() string { - b := make([]rune, 10) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} - func Test_DryRun(t *testing.T) { - f, err := os.CreateTemp("/tmp", "Test_DryRun") + f, err := os.CreateTemp("", "Test_DryRun") require.NoError(t, err) defer os.Remove(f.Name()) @@ -701,7 +687,7 @@ func Test_DryRun(t *testing.T) { } func Test_Reload(t *testing.T) { - f, err := os.CreateTemp("/tmp", "Test_Reload") + f, err := os.CreateTemp("", "Test_Reload") require.NoError(t, err) defer os.Remove(f.Name()) @@ -770,7 +756,7 @@ func Test_Reload(t *testing.T) { } func Test_ReloadFail_NotPanic(t *testing.T) { - f, err := os.CreateTemp("/tmp", "Test_Reload") + f, err := os.CreateTemp("", "Test_Reload") require.NoError(t, err) defer os.Remove(f.Name()) diff --git a/clients/pkg/promtail/targets/gcplog/pull_target.go b/clients/pkg/promtail/targets/gcplog/pull_target.go index b1f1f15b50b0..fba08b804136 100644 --- a/clients/pkg/promtail/targets/gcplog/pull_target.go +++ b/clients/pkg/promtail/targets/gcplog/pull_target.go @@ -1,17 +1,18 @@ package gcplog import ( - "cloud.google.com/go/pubsub" "context" + "io" + "sync" + "time" + + "cloud.google.com/go/pubsub" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/backoff" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" "google.golang.org/api/option" - "io" - "sync" - "time" "github.com/grafana/loki/clients/pkg/promtail/api" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" diff --git a/clients/pkg/promtail/targets/gcplog/pull_target_test.go b/clients/pkg/promtail/targets/gcplog/pull_target_test.go index bd6b72c8c902..7c1746f4904f 100644 --- a/clients/pkg/promtail/targets/gcplog/pull_target_test.go +++ b/clients/pkg/promtail/targets/gcplog/pull_target_test.go @@ -2,12 +2,13 @@ package gcplog import ( "context" - "github.com/grafana/dskit/backoff" - "github.com/pkg/errors" "io" "testing" "time" + "github.com/grafana/dskit/backoff" + "github.com/pkg/errors" + "cloud.google.com/go/pubsub" "github.com/go-kit/log" "github.com/grafana/loki/clients/pkg/promtail/client/fake" diff --git a/clients/pkg/promtail/targets/journal/journaltarget_test.go b/clients/pkg/promtail/targets/journal/journaltarget_test.go index 72220f5ca421..03d0dadae7a9 100644 --- a/clients/pkg/promtail/targets/journal/journaltarget_test.go +++ b/clients/pkg/promtail/targets/journal/journaltarget_test.go @@ -6,6 +6,7 @@ package journal import ( "io" "os" + "path/filepath" "strings" "testing" "time" @@ -70,7 +71,7 @@ func TestJournalTarget(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer @@ -132,7 +133,7 @@ func TestJournalTargetParsingErrors(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer @@ -200,7 +201,7 @@ func TestJournalTarget_JSON(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer @@ -260,7 +261,7 @@ func TestJournalTarget_Since(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer @@ -294,7 +295,7 @@ func TestJournalTarget_Cursor_TooOld(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer @@ -334,7 +335,7 @@ func TestJournalTarget_Cursor_NotTooOld(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer @@ -391,7 +392,7 @@ func TestJournalTarget_Matches(t *testing.T) { logger := log.NewLogfmtLogger(w) testutils.InitRandom() - dirName := "/tmp/" + testutils.RandName() + dirName := filepath.Join(os.TempDir(), testutils.RandName()) positionsFileName := dirName + "/positions.yml" // Set the sync period to a really long value, to guarantee the sync timer diff --git a/cmd/loki-canary/main.go b/cmd/loki-canary/main.go index 86034f1565ad..e3b06766d5eb 100644 --- a/cmd/loki-canary/main.go +++ b/cmd/loki-canary/main.go @@ -99,7 +99,7 @@ func main() { } if *addr == "" { - _, _ = fmt.Fprintf(os.Stderr, "Must specify a Loki address with -addr or set the environemnt variable LOKI_ADDRESS\n") + _, _ = fmt.Fprintf(os.Stderr, "Must specify a Loki address with -addr or set the environment variable LOKI_ADDRESS\n") os.Exit(1) } diff --git a/docs/Makefile b/docs/Makefile index 45c40d0bd72a..e3a60c369815 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,5 +1,5 @@ -PODMAN = $(shell if command -v podman &>/dev/null; then echo podman; else echo docker; fi) -IMAGE = grafana/docs-base:latest +PODMAN := $(shell if command -v podman >/dev/null 2>&1; then echo podman; else echo docker; fi) +IMAGE := grafana/docs-base:latest BUILD_IN_CONTAINER ?= true .PHONY: pull diff --git a/docs/sources/clients/promtail/configuration.md b/docs/sources/clients/promtail/configuration.md index 0178021938af..13cf60e6dbe0 100644 --- a/docs/sources/clients/promtail/configuration.md +++ b/docs/sources/clients/promtail/configuration.md @@ -187,6 +187,12 @@ Loki: # Example: http://example.com:3100/loki/api/v1/push url: +# Custom HTTP headers to be sent along with each push request. +# Be aware that headers that are set by Promtail itself (e.g. X-Scope-OrgID) can't be overwritten. +headers: + # Example: CF-Access-Client-Id: xxx + [ : ... ] + # The tenant ID used by default to push logs to Loki. If omitted or empty # it assumes Loki is running in single-tenant mode and no X-Scope-OrgID header # is sent. diff --git a/docs/sources/clients/promtail/stages/drop.md b/docs/sources/clients/promtail/stages/drop.md index 17281562103b..fb3aa84d5f51 100644 --- a/docs/sources/clients/promtail/stages/drop.md +++ b/docs/sources/clients/promtail/stages/drop.md @@ -4,28 +4,33 @@ description: drop stage --- # drop -The `drop` stage is a filtering stage that lets you drop logs based on several options. +The `drop` stage is a filtering stage that lets you drop logs based on several options. It's important to note that if you provide multiple options they will be treated like an AND clause, where each option has to be true to drop the log. If you wish to drop with an OR clause, then specify multiple drop stages. -There are examples below to help explain. +There are examples below to help explain. ## Drop stage schema ```yaml drop: - # Name from extracted data to parse. If empty, uses the log message. - [source: ] - - # RE2 regular expression, if source is provided the regex will attempt to match the source - # If no source is provided, then the regex attempts to match the log line - # If the provided regex matches the log line or a provided source, the line will be dropped. + # Single name or names list of extracted data. If empty, uses the log message. + [source: [] | ] + + # Separator placed between concatenated extracted data names. The default separator is a semicolon. + [separator: | default = ";"] + + # RE2 regular expression. If `source` is provided and it's a list, the regex will attempt to match + # the concatenated sources. If no source is provided, then the regex attempts + # to match the log line. + # If the provided regex matches the log line or the source, the line will be dropped. [expression: ] - # value can only be specified when source is specified. It is an error to specify value and regex. + # value can only be specified when source is specified. If `source` is provided and it's a list, + # the value will attempt to match the concatenated sources. It is an error to specify value and expression. # If the value provided is an exact match for the `source` the line will be dropped. [value: ] @@ -38,9 +43,9 @@ drop: # Or can be expressed with a suffix such as 8kb [longer_than: |] - # Every time a log line is dropped the metric `logentry_dropped_lines_total` - # will be incremented. By default the reason label will be `drop_stage` - # however you can optionally specify a custom value to be used in the `reason` + # Every time a log line is dropped the metric `logentry_dropped_lines_total` + # will be incremented. By default the reason label will be `drop_stage` + # however you can optionally specify a custom value to be used in the `reason` # label of that metric here. [drop_counter_reason: | default = "drop_stage"] ``` @@ -53,18 +58,26 @@ The following are examples showing the use of the `drop` stage. Simple `drop` stage configurations only specify one of the options, or two options when using the `source` option. +Given the pipeline: + +```yaml +- drop: + source: ["level","msg"] +``` + +Drops any log line that has an extracted data field of at least `level` or `msg`. + #### Regex match a line -Given the pipeline: +This example pipeline drops any log line with the substring "debug" in it: ```yaml - drop: expression: ".*debug.*" ``` -Would drop any log line with the word `debug` in it. -#### Regex match a source +#### Regex match concatenated sources Given the pipeline: @@ -74,11 +87,12 @@ Given the pipeline: level: msg: - drop: - source: "level" - expression: "(error|ERROR)" + source: ["level","msg"] + separator: "#" + expression: "(error|ERROR)#.*\/loki\/api\/push.*" ``` -Would drop both of these log lines: +Drops both of these log lines: ``` {"time":"2019-01-01T01:00:00.000000001Z", "level": "error", "msg":"11.11.11.11 - "POST /loki/api/push/ HTTP/1.1" 200 932 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7 GTB6"} @@ -187,8 +201,8 @@ Given the pipeline: - drop: longer_than: 8kb - drop: - source: msg - regex: ".*trace.*" + source: "msg" + expression: ".*trace.*" ``` Would drop all logs older than 24h OR longer than 8kb bytes OR have a json `msg` field containing the word _trace_ diff --git a/docs/sources/configuration/_index.md b/docs/sources/configuration/_index.md index 0dbf779396b1..476befbdd9ec 100644 --- a/docs/sources/configuration/_index.md +++ b/docs/sources/configuration/_index.md @@ -199,6 +199,11 @@ Pass the `-config.expand-env` flag at the command line to enable this way of set # configuration is given in other sections, the related configuration within # this section will be ignored. [common: ] + +# How long to wait between SIGTERM and shutdown. After receiving SIGTERM, Loki +# will report 503 Service Unavailable status via /ready endpoint. +# CLI flag: -shutdown-delay +[shutdown_delay: | default = 0s] ``` ### server diff --git a/docs/sources/configuration/examples.md b/docs/sources/configuration/examples.md index 4d51158a0520..6410a870c8e5 100644 --- a/docs/sources/configuration/examples.md +++ b/docs/sources/configuration/examples.md @@ -4,49 +4,58 @@ description: Loki Configuration Examples --- # Examples -## almost-zero-dependency.yaml +## 1-Local-Configuration-Example.yaml ```yaml -# This is a configuration to deploy Loki depending only on a storage solution -# for example, an S3-compatible API like MinIO. -# The ring configuration is based on the gossip memberlist and the index is shipped to storage -# via Single Store (boltdb-shipper) - auth_enabled: false server: http_listen_port: 3100 -distributor: +common: ring: + instance_addr: 127.0.0.1 kvstore: - store: memberlist + store: inmemory + replication_factor: 1 + path_prefix: /tmp/loki -ingester: - lifecycler: - ring: - kvstore: - store: memberlist - replication_factor: 1 - final_sleep: 0s - chunk_idle_period: 5m - chunk_retain_period: 30s +schema_config: + configs: + - from: 2020-05-15 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h +``` -memberlist: - abort_if_cluster_join_fails: false - # Expose this port on all distributor, ingester - # and querier replicas. - bind_port: 7946 +## 2-S3-Cluster-Example.yaml - # You can use a headless k8s service for all distributor, - # ingester and querier components. - join_members: - - loki-gossip-ring.loki.svc.cluster.local:7946 +```yaml +# This is a complete configuration to deploy Loki backed by a s3-Comaptible API +# like MinIO for storage. Loki components will use memberlist ring to shard and +# the index will be shipped to storage via boltdb-shipper. + +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + ring: + instance_addr: 127.0.0.1 + kvstore: + store: memberlist + replication_factor: 1 + path_prefix: /loki # Update this accordingly, data will be stored here. - max_join_backoff: 1m - max_join_retries: 10 - min_join_backoff: 1s +memberlist: + join_members: + # You can use a headless k8s service for all distributor, ingester and querier components. + - loki-gossip-ring.loki.svc.cluster.local:7946 # :7946 is the default memberlist port. schema_config: configs: @@ -63,24 +72,18 @@ storage_config: active_index_directory: /loki/index cache_location: /loki/index_cache shared_store: s3 - aws: s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name s3forcepathstyle: true -limits_config: - enforce_metric_name: false - reject_old_samples: true - reject_old_samples_max_age: 168h - compactor: - working_directory: /data/compactor + working_directory: /loki/compactor shared_store: s3 compaction_interval: 5m ``` -## aws-basic-config-no-creds.yaml +## 3-S3-Without-Credentials-Snippet.yaml ```yaml # If you don't wish to hard-code S3 credentials you can also configure an EC2 @@ -103,29 +106,7 @@ storage_config: ``` -## aws-basic-config.yaml - -```yaml -# This partial configuration uses S3 for chunk storage and uses DynamoDB for index storage - -schema_config: - configs: - - from: 2020-05-15 - store: aws - object_store: s3 - schema: v11 - index: - prefix: loki_ -storage_config: - aws: - s3: s3://access_key:secret_access_key@region/bucket_name - dynamodb: - dynamodb_url: dynamodb://access_key:secret_access_key@region - -``` - - -## bos-config.yaml +## 4-BOS-Example.yaml ```yaml schema_config: @@ -156,83 +137,60 @@ compactor: ``` -## cassandra-index.yaml +## 5-S3-And-DynamoDB-Snippet.yaml ```yaml -# This is a partial config that uses the local filesystem for chunk storage and Cassandra for index storage +# This partial configuration uses S3 for chunk storage and uses DynamoDB for index storage schema_config: configs: - from: 2020-05-15 - store: cassandra - object_store: filesystem + store: aws + object_store: s3 schema: v11 index: - prefix: cassandra_table - period: 168h - + prefix: loki_ storage_config: - cassandra: - username: cassandra - password: cassandra - addresses: 127.0.0.1 - auth: true - keyspace: lokiindex - - filesystem: - directory: /tmp/loki/chunks - + aws: + s3: s3://access_key:secret_access_key@region/bucket_name + dynamodb: + dynamodb_url: dynamodb://access_key:secret_access_key@region + ``` -## complete-local-config.yaml +## 6-Cassandra-Snippet.yaml ```yaml -auth_enabled: false - -server: - http_listen_port: 3100 - -ingester: - lifecycler: - address: 127.0.0.1 - ring: - kvstore: - store: inmemory - replication_factor: 1 - final_sleep: 0s - chunk_idle_period: 5m - chunk_retain_period: 30s +# This is a partial config that uses the local filesystem for chunk storage and Cassandra for index storage schema_config: configs: - from: 2020-05-15 - store: boltdb + store: cassandra object_store: filesystem schema: v11 index: - prefix: index_ + prefix: cassandra_table period: 168h storage_config: - boltdb: - directory: /tmp/loki/index + cassandra: + username: cassandra + password: cassandra + addresses: 127.0.0.1 + auth: true + keyspace: lokiindex filesystem: directory: /tmp/loki/chunks - -limits_config: - enforce_metric_name: false - reject_old_samples: true - reject_old_samples_max_age: 168h + ``` -## example-schema-config.yaml +## 7-Schema-Migration-Snippet.yaml ```yaml -# Additional example schema configuration for Cassandra - schema_config: configs: # Starting from 2018-04-15 Loki should store indexes on Cassandra @@ -258,7 +216,7 @@ schema_config: ``` -## google-cloud-storage-config.yaml +## 8-GCS-Snippet.yaml ```yaml # This partial configuration uses GCS for chunk storage and uses BigTable for index storage @@ -283,31 +241,7 @@ storage_config: ``` -## s3-compatible-apis.yaml - -```yaml -# S3-compatible APIs such as Ceph Object Storage with an S3-compatible API, can be used. -# If the API supports path-style URLs rather than virtual hosted bucket addressing, -# configure the URL in `storage_config` with the custom endpoint - -schema_config: - configs: - - from: 2020-05-15 - store: aws - object_store: s3 - schema: v11 - index: - prefix: loki_ -storage_config: - aws: - s3: s3://access_key:secret_access_key@region/bucket_name - dynamodb: - dynamodb_url: dynamodb://access_key:secret_access_key@region - -``` - - -## s3-expanded-config.yaml +## 9-Expanded-S3-Snippet.yaml ```yaml # S3 configuration supports an expanded configuration. diff --git a/docs/sources/configuration/examples/1-Local-Configuration-Example.yaml b/docs/sources/configuration/examples/1-Local-Configuration-Example.yaml new file mode 100644 index 000000000000..6385f22b2bd6 --- /dev/null +++ b/docs/sources/configuration/examples/1-Local-Configuration-Example.yaml @@ -0,0 +1,22 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + ring: + instance_addr: 127.0.0.1 + kvstore: + store: inmemory + replication_factor: 1 + path_prefix: /tmp/loki + +schema_config: + configs: + - from: 2020-05-15 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h \ No newline at end of file diff --git a/docs/sources/configuration/examples/2-S3-Cluster-Example.yaml b/docs/sources/configuration/examples/2-S3-Cluster-Example.yaml new file mode 100644 index 000000000000..5f76f7988190 --- /dev/null +++ b/docs/sources/configuration/examples/2-S3-Cluster-Example.yaml @@ -0,0 +1,45 @@ +# This is a complete configuration to deploy Loki backed by a s3-Comaptible API +# like MinIO for storage. Loki components will use memberlist ring to shard and +# the index will be shipped to storage via boltdb-shipper. + +auth_enabled: false + +server: + http_listen_port: 3100 + +common: + ring: + instance_addr: 127.0.0.1 + kvstore: + store: memberlist + replication_factor: 1 + path_prefix: /loki # Update this accordingly, data will be stored here. + +memberlist: + join_members: + # You can use a headless k8s service for all distributor, ingester and querier components. + - loki-gossip-ring.loki.svc.cluster.local:7946 # :7946 is the default memberlist port. + +schema_config: + configs: + - from: 2020-05-15 + store: boltdb-shipper + object_store: s3 + schema: v11 + index: + prefix: index_ + period: 24h + +storage_config: + boltdb_shipper: + active_index_directory: /loki/index + cache_location: /loki/index_cache + shared_store: s3 + aws: + s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name + s3forcepathstyle: true + +compactor: + working_directory: /loki/compactor + shared_store: s3 + compaction_interval: 5m \ No newline at end of file diff --git a/docs/sources/configuration/examples/aws-basic-config-no-creds.yaml b/docs/sources/configuration/examples/3-S3-Without-Credentials-Snippet.yaml similarity index 100% rename from docs/sources/configuration/examples/aws-basic-config-no-creds.yaml rename to docs/sources/configuration/examples/3-S3-Without-Credentials-Snippet.yaml diff --git a/docs/sources/configuration/examples/bos-config.yaml b/docs/sources/configuration/examples/4-BOS-Example.yaml similarity index 100% rename from docs/sources/configuration/examples/bos-config.yaml rename to docs/sources/configuration/examples/4-BOS-Example.yaml diff --git a/docs/sources/configuration/examples/aws-basic-config.yaml b/docs/sources/configuration/examples/5-S3-And-DynamoDB-Snippet.yaml similarity index 100% rename from docs/sources/configuration/examples/aws-basic-config.yaml rename to docs/sources/configuration/examples/5-S3-And-DynamoDB-Snippet.yaml diff --git a/docs/sources/configuration/examples/cassandra-index.yaml b/docs/sources/configuration/examples/6-Cassandra-Snippet.yaml similarity index 100% rename from docs/sources/configuration/examples/cassandra-index.yaml rename to docs/sources/configuration/examples/6-Cassandra-Snippet.yaml diff --git a/docs/sources/configuration/examples/example-schema-config.yaml b/docs/sources/configuration/examples/7-Schema-Migration-Snippet.yaml similarity index 91% rename from docs/sources/configuration/examples/example-schema-config.yaml rename to docs/sources/configuration/examples/7-Schema-Migration-Snippet.yaml index 618f8dafe442..0d72566c7e3a 100644 --- a/docs/sources/configuration/examples/example-schema-config.yaml +++ b/docs/sources/configuration/examples/7-Schema-Migration-Snippet.yaml @@ -1,5 +1,3 @@ -# Additional example schema configuration for Cassandra - schema_config: configs: # Starting from 2018-04-15 Loki should store indexes on Cassandra diff --git a/docs/sources/configuration/examples/google-cloud-storage-config.yaml b/docs/sources/configuration/examples/8-GCS-Snippet.yaml similarity index 100% rename from docs/sources/configuration/examples/google-cloud-storage-config.yaml rename to docs/sources/configuration/examples/8-GCS-Snippet.yaml diff --git a/docs/sources/configuration/examples/s3-expanded-config.yaml b/docs/sources/configuration/examples/9-Expanded-S3-Snippet.yaml similarity index 100% rename from docs/sources/configuration/examples/s3-expanded-config.yaml rename to docs/sources/configuration/examples/9-Expanded-S3-Snippet.yaml diff --git a/docs/sources/configuration/examples/almost-zero-dependency.yaml b/docs/sources/configuration/examples/almost-zero-dependency.yaml deleted file mode 100644 index c82a3abf8929..000000000000 --- a/docs/sources/configuration/examples/almost-zero-dependency.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# This is a configuration to deploy Loki depending only on a storage solution -# for example, an S3-compatible API like MinIO. -# The ring configuration is based on the gossip memberlist and the index is shipped to storage -# via Single Store (boltdb-shipper) - -auth_enabled: false - -server: - http_listen_port: 3100 - -distributor: - ring: - kvstore: - store: memberlist - -ingester: - lifecycler: - ring: - kvstore: - store: memberlist - replication_factor: 1 - final_sleep: 0s - chunk_idle_period: 5m - chunk_retain_period: 30s - -memberlist: - abort_if_cluster_join_fails: false - - # Expose this port on all distributor, ingester - # and querier replicas. - bind_port: 7946 - - # You can use a headless k8s service for all distributor, - # ingester and querier components. - join_members: - - loki-gossip-ring.loki.svc.cluster.local:7946 - - max_join_backoff: 1m - max_join_retries: 10 - min_join_backoff: 1s - -schema_config: - configs: - - from: 2020-05-15 - store: boltdb-shipper - object_store: s3 - schema: v11 - index: - prefix: index_ - period: 24h - -storage_config: - boltdb_shipper: - active_index_directory: /loki/index - cache_location: /loki/index_cache - shared_store: s3 - - aws: - s3: s3://access_key:secret_access_key@custom_endpoint/bucket_name - s3forcepathstyle: true - -limits_config: - enforce_metric_name: false - reject_old_samples: true - reject_old_samples_max_age: 168h - -compactor: - working_directory: /data/compactor - shared_store: s3 - compaction_interval: 5m diff --git a/docs/sources/configuration/examples/complete-local-config.yaml b/docs/sources/configuration/examples/complete-local-config.yaml deleted file mode 100644 index 16942b969466..000000000000 --- a/docs/sources/configuration/examples/complete-local-config.yaml +++ /dev/null @@ -1,37 +0,0 @@ -auth_enabled: false - -server: - http_listen_port: 3100 - -ingester: - lifecycler: - address: 127.0.0.1 - ring: - kvstore: - store: inmemory - replication_factor: 1 - final_sleep: 0s - chunk_idle_period: 5m - chunk_retain_period: 30s - -schema_config: - configs: - - from: 2020-05-15 - store: boltdb - object_store: filesystem - schema: v11 - index: - prefix: index_ - period: 168h - -storage_config: - boltdb: - directory: /tmp/loki/index - - filesystem: - directory: /tmp/loki/chunks - -limits_config: - enforce_metric_name: false - reject_old_samples: true - reject_old_samples_max_age: 168h diff --git a/docs/sources/configuration/examples/s3-compatible-apis.yaml b/docs/sources/configuration/examples/s3-compatible-apis.yaml deleted file mode 100644 index 6631cbb119a0..000000000000 --- a/docs/sources/configuration/examples/s3-compatible-apis.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# S3-compatible APIs such as Ceph Object Storage with an S3-compatible API, can be used. -# If the API supports path-style URLs rather than virtual hosted bucket addressing, -# configure the URL in `storage_config` with the custom endpoint - -schema_config: - configs: - - from: 2020-05-15 - store: aws - object_store: s3 - schema: v11 - index: - prefix: loki_ -storage_config: - aws: - s3: s3://access_key:secret_access_key@region/bucket_name - dynamodb: - dynamodb_url: dynamodb://access_key:secret_access_key@region - \ No newline at end of file diff --git a/docs/sources/installation/docker.md b/docs/sources/installation/docker.md index 0e1cd30c431e..449d226e6733 100644 --- a/docs/sources/installation/docker.md +++ b/docs/sources/installation/docker.md @@ -22,9 +22,9 @@ The configuration acquired with these installation instructions run Loki as a si Copy and paste the commands below into your command line. ```bash -wget https://raw.githubusercontent.com/grafana/loki/v2.7.2/cmd/loki/loki-local-config.yaml -O loki-config.yaml +wget https://raw.githubusercontent.com/grafana/loki/v2.7.3/cmd/loki/loki-local-config.yaml -O loki-config.yaml docker run --name loki -d -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:2.6.1 -config.file=/mnt/config/loki-config.yaml -wget https://raw.githubusercontent.com/grafana/loki/v2.7.2/clients/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml +wget https://raw.githubusercontent.com/grafana/loki/v2.7.3/clients/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml docker run --name promtail -d -v $(pwd):/mnt/config -v /var/log:/var/log --link loki grafana/promtail:2.6.1 -config.file=/mnt/config/promtail-config.yaml ``` @@ -40,9 +40,9 @@ Copy and paste the commands below into your terminal. Note that you will need to ```bash cd "" -wget https://raw.githubusercontent.com/grafana/loki/v2.7.2/cmd/loki/loki-local-config.yaml -O loki-config.yaml +wget https://raw.githubusercontent.com/grafana/loki/v2.7.3/cmd/loki/loki-local-config.yaml -O loki-config.yaml docker run --name loki -v :/mnt/config -p 3100:3100 grafana/loki:2.6.1 --config.file=/mnt/config/loki-config.yaml -wget https://raw.githubusercontent.com/grafana/loki/v2.7.2/clients/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml +wget https://raw.githubusercontent.com/grafana/loki/v2.7.3/clients/cmd/promtail/promtail-docker-config.yaml -O promtail-config.yaml docker run -v :/mnt/config -v /var/log:/var/log --link loki grafana/promtail:2.6.1 --config.file=/mnt/config/promtail-config.yaml ``` @@ -55,6 +55,6 @@ Navigate to http://localhost:3100/metrics to view the output. Run the following commands in your command line. They work for Windows or Linux systems. ```bash -wget https://raw.githubusercontent.com/grafana/loki/v2.7.2/production/docker-compose.yaml -O docker-compose.yaml +wget https://raw.githubusercontent.com/grafana/loki/v2.7.3/production/docker-compose.yaml -O docker-compose.yaml docker-compose -f docker-compose.yaml up ``` diff --git a/docs/sources/installation/helm/install-monolithic/index.md b/docs/sources/installation/helm/install-monolithic/index.md index ebb3e7427f1c..235706b410f8 100644 --- a/docs/sources/installation/helm/install-monolithic/index.md +++ b/docs/sources/installation/helm/install-monolithic/index.md @@ -12,9 +12,8 @@ keywords: [] This Helm Chart installation runs the Grafana Loki *single binary* within a Kubernetes cluster. -If the storage type is set to `filesystem`, this chart configures Loki to run the `all` target in a [monolithic mode]({{}}), designed to work with a filesystem storage. It will also configure meta-monitoring of metrics and logs. - -It is not possible to install the single binary with a different storage type. +If you set the `singleBinary.replicas` value to 1, this chart configures Loki to run the `all` target in a [monolithic mode]({{}}), designed to work with a filesystem storage. It will also configure meta-monitoring of metrics and logs. +If you set the `singleBinary.replicas` value to 2 or more, this chart configures Loki to run a *single binary* in a replicated, highly available mode. When running replicas of a single binary, you must configure object storage. **Before you begin: Software Requirements** @@ -35,9 +34,9 @@ It is not possible to install the single binary with a different storage type. helm repo update ``` -1. Configure the `filesystem` storage: +1. Create the configuration file `values.yaml`: - - Create the configuration file `values.yaml`: + - If running a single replica of Loki, configure the `filesystem` storage: ```yaml loki: @@ -45,6 +44,25 @@ It is not possible to install the single binary with a different storage type. replication_factor: 1 storage: type: 'filesystem' + singleBinary: + replicas: 1 + ``` + + - If running Loki with a replication factor greater than 1, set the desired number replicas and provide object storage credentials: + + ```yaml + loki: + commonConfig: + replication_factor: 3 + storage: + type: 's3' + s3: + endpoint: foo.aws.com + bucketnames: loki-chunks + secret_access_key: supersecret + access_key_id: secret + singleBinary: + replicas: 3 ``` 1. Deploy the Loki cluster using one of these commands. @@ -58,5 +76,5 @@ It is not possible to install the single binary with a different storage type. - Deploy with the defined configuration in a custom Kubernetes cluster namespace: ```bash - helm install --values values.yaml loki --namespace=loki grafana/loki-simple-scalable + helm install --values values.yaml loki --namespace=loki grafana/loki ``` diff --git a/docs/sources/installation/helm/reference.md b/docs/sources/installation/helm/reference.md index 4a803d727cee..b99f7d4661e3 100644 --- a/docs/sources/installation/helm/reference.md +++ b/docs/sources/installation/helm/reference.md @@ -3167,7 +3167,7 @@ null int Number of replicas for the single binary
-1
+0
 
diff --git a/docs/sources/installation/sizing/index.md b/docs/sources/installation/sizing/index.md index c31eec734bc8..d618eb6d6b43 100644 --- a/docs/sources/installation/sizing/index.md +++ b/docs/sources/installation/sizing/index.md @@ -19,24 +19,25 @@ This tool helps to generate a Helm Charts `values.yaml` file based on specified [scalable]({{}}) deployment. The storage needs to be configured after generation.
- - - - +
+ + GB/day +
- - +
+ + days +
- @@ -49,12 +50,14 @@ This tool helps to generate a Helm Charts `values.yaml` file based on specified Write Replicas Nodes Cores + Memory {{ clusterSize.TotalReadReplicas }} {{ clusterSize.TotalWriteReplicas }} {{ clusterSize.TotalNodes}} {{ clusterSize.TotalCoresRequest}} + {{ clusterSize.TotalMemoryRequest}} GB
@@ -72,13 +75,13 @@ This tool helps to generate a Helm Charts `values.yaml` file based on specified Defines how long the ingested logs should be kept. - Defines the expected query performance. Basic enables 3mbps. Super should be chosen if more query throughput is required. + Defines the expected query performance. Basic is sized for a max query throughput of around 3GB/s. Super aims for 25% more throughput.
-.