Skip to content

Commit

Permalink
Merge pull request grafana#259 from periklis/openshift-main-2024-02-05
Browse files Browse the repository at this point in the history
Update from upstream repository incl. go1.20 compatibily patches
  • Loading branch information
periklis authored Feb 5, 2024
2 parents cec7a08 + 4e625b0 commit 243c532
Show file tree
Hide file tree
Showing 165 changed files with 8,090 additions and 3,852 deletions.
10 changes: 9 additions & 1 deletion .github/workflows/checks.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
name: Checks
on: [push]
on:
pull_request:
branches:
- main
jobs:
checks:
runs-on: ubuntu-latest
Expand All @@ -10,6 +13,11 @@ jobs:
steps:
- uses: actions/checkout@v4
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- name: golangci-lint
uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5
with:
version: v1.55.1
only-new-issues: true
- run: make lint
- run: make check-doc
- run: make check-mod
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
* [11679](https://github.com/grafana/loki/pull/11679) **dannykopping** Cache: extending #11535 to align custom ingester query split with cache keys for correct caching of results.
* [11143](https://github.com/grafana/loki/pull/11143) **sandeepsukhani** otel: Add support for per tenant configuration for mapping otlp data to loki format
* [11499](https://github.com/grafana/loki/pull/11284) **jmichalek132** Config: Adds `frontend.log-query-request-headers` to enable logging of request headers in query logs.
* [11817](https://github.com/grafana/loki/pull/11817) **ashwanthgoli** Ruler: Add support for filtering results of `/prometheus/api/v1/rules` endpoint by rule_name, rule_group, file and type.

##### Fixes
* [11074](https://github.com/grafana/loki/pull/11074) **hainenber** Fix panic in lambda-promtail due to mishandling of empty DROP_LABELS env var.
Expand Down
65 changes: 36 additions & 29 deletions clients/pkg/promtail/targets/docker/target_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"net/http"
"net/http/httptest"
"os"
"sort"
"strings"
"testing"
"time"
Expand All @@ -17,6 +16,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/relabel"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/grafana/loki/clients/pkg/promtail/client/fake"
Expand Down Expand Up @@ -77,49 +77,56 @@ func Test_DockerTarget(t *testing.T) {
)
require.NoError(t, err)

require.Eventually(t, func() bool {
return len(entryHandler.Received()) >= 5
}, 5*time.Second, 100*time.Millisecond)

received := entryHandler.Received()
sort.Slice(received, func(i, j int) bool {
return received[i].Timestamp.Before(received[j].Timestamp)
})

expectedLines := []string{
"5.3.69.55 - - [09/Dec/2021:09:15:02 +0000] \"HEAD /brand/users/clicks-and-mortar/front-end HTTP/2.0\" 503 27087",
"101.54.183.185 - - [09/Dec/2021:09:15:03 +0000] \"POST /next-generation HTTP/1.0\" 416 11468",
"69.27.137.160 - runolfsdottir2670 [09/Dec/2021:09:15:03 +0000] \"HEAD /content/visionary/engineer/cultivate HTTP/1.1\" 302 2975",
"28.104.242.74 - - [09/Dec/2021:09:15:03 +0000] \"PATCH /value-added/cultivate/systems HTTP/2.0\" 405 11843",
"150.187.51.54 - satterfield1852 [09/Dec/2021:09:15:03 +0000] \"GET /incentivize/deliver/innovative/cross-platform HTTP/1.1\" 301 13032",
}
actualLines := make([]string, 0, 5)
for _, entry := range received[:5] {
actualLines = append(actualLines, entry.Line)
}
require.ElementsMatch(t, actualLines, expectedLines)

assert.EventuallyWithT(t, func(c *assert.CollectT) {
assertExpectedLog(c, entryHandler, expectedLines)
}, 5*time.Second, 100*time.Millisecond, "Expected log lines were not found within the time limit.")

target.Stop()
entryHandler.Clear()
// restart target to simulate container restart
target.startIfNotRunning()
entryHandler.Clear()
require.Eventually(t, func() bool {
return len(entryHandler.Received()) >= 5
}, 5*time.Second, 100*time.Millisecond)

receivedAfterRestart := entryHandler.Received()
sort.Slice(receivedAfterRestart, func(i, j int) bool {
return receivedAfterRestart[i].Timestamp.Before(receivedAfterRestart[j].Timestamp)
})
actualLinesAfterRestart := make([]string, 0, 5)
for _, entry := range receivedAfterRestart[:5] {
actualLinesAfterRestart = append(actualLinesAfterRestart, entry.Line)
}
expectedLinesAfterRestart := []string{
"243.115.12.215 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /morph/exploit/granular HTTP/1.0\" 500 26468",
"221.41.123.237 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /user-centric/whiteboard HTTP/2.0\" 205 22487",
"89.111.144.144 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /open-source/e-commerce HTTP/1.0\" 401 11092",
"62.180.191.187 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /cultivate/integrate/technologies HTTP/2.0\" 302 12979",
"156.249.2.192 - - [09/Dec/2023:09:16:57 +0000] \"POST /revolutionize/mesh/metrics HTTP/2.0\" 401 5297",
}
require.ElementsMatch(t, actualLinesAfterRestart, expectedLinesAfterRestart)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
assertExpectedLog(c, entryHandler, expectedLinesAfterRestart)
}, 5*time.Second, 100*time.Millisecond, "Expected log lines after restart were not found within the time limit.")
}

// assertExpectedLog will verify that all expectedLines were received, in any order, without duplicates.
func assertExpectedLog(c *assert.CollectT, entryHandler *fake.Client, expectedLines []string) {
logLines := entryHandler.Received()
testLogLines := make(map[string]int)
for _, l := range logLines {
if containsString(expectedLines, l.Line) {
testLogLines[l.Line]++
}
}
// assert that all log lines were received
assert.Len(c, testLogLines, len(expectedLines))
// assert that there are no duplicated log lines
for _, v := range testLogLines {
assert.Equal(c, v, 1)
}
}

func containsString(slice []string, str string) bool {
for _, item := range slice {
if item == str {
return true
}
}
return false
}
39 changes: 33 additions & 6 deletions docs/sources/configure/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -3143,14 +3143,22 @@ shard_streams:

# OTLP log ingestion configurations
otlp_config:
# Configuration for resource attributes to store them as index labels or
# Structured Metadata or drop them altogether
resource_attributes:
[ignore_defaults: <boolean>]
# Configure whether to ignore the default list of resource attributes to be
# stored as index labels and only use the given resource attributes config
[ignore_defaults: <boolean> | default = false]

[attributes: <list of AttributesConfigs>]
[attributes_config: <list of attributes_configs>]

[scope_attributes: <list of AttributesConfigs>]
# Configuration for scope attributes to store them as Structured Metadata or
# drop them altogether
[scope_attributes: <list of attributes_configs>]

[log_attributes: <list of AttributesConfigs>]
# Configuration for log attributes to store them as Structured Metadata or
# drop them altogether
[log_attributes: <list of attributes_configs>]
```
### frontend_worker
Expand Down Expand Up @@ -4577,7 +4585,7 @@ chunks:
[tags: <map of string to string>]
# How many shards will be created. Only used if schema is v10 or greater.
[row_shards: <int>]
[row_shards: <int> | default = 16]
```

### aws_storage_config
Expand Down Expand Up @@ -5292,6 +5300,24 @@ Named store from this example can be used by setting object_store to store-1 in
[cos: <map of string to cos_storage_config>]
```

### attributes_config

Define actions for matching OpenTelemetry (OTEL) attributes.

```yaml
# Configures action to take on matching attributes. It allows one of
# [structured_metadata, drop] for all attribute types. It additionally allows
# index_label action for resource attributes
[action: <string> | default = ""]
# List of attributes to configure how to store them or drop them altogether
[attributes: <list of strings>]
# Regex to choose attributes to configure how to store them or drop them
# altogether
[regex: <Regexp>]
```

## Runtime Configuration file

Loki has a concept of "runtime config" file, which is simply a file that is reloaded while Loki is running. It is used by some Loki components to allow operator to change some aspects of Loki configuration without restarting it. File is specified by using `-runtime-config.file=<filename>` flag and reload period (which defaults to 10 seconds) can be changed by `-runtime-config.reload-period=<duration>` flag. Previously this mechanism was only used by limits overrides, and flags were called `-limits.per-user-override-config=<filename>` and `-limits.per-user-override-period=10s` respectively. These are still used, if `-runtime-config.file=<filename>` is not specified.
Expand Down Expand Up @@ -5345,7 +5371,8 @@ place in the `limits_config` section:
configure a runtime configuration file:

```
runtime_config: overrides.yaml
runtime_config:
file: overrides.yaml
```

In the `overrides.yaml` file, add `unordered_writes` for each tenant
Expand Down
3 changes: 2 additions & 1 deletion docs/sources/configure/index.template
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,8 @@ place in the `limits_config` section:
configure a runtime configuration file:

```
runtime_config: overrides.yaml
runtime_config:
file: overrides.yaml
```

In the `overrides.yaml` file, add `unordered_writes` for each tenant
Expand Down
38 changes: 24 additions & 14 deletions docs/sources/get-started/labels/structured-metadata.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,24 +6,22 @@ description: Describes how to enable structure metadata for logs and how to quer
# What is structured metadata

{{% admonition type="warning" %}}
Structured metadata is an experimental feature and is subject to change in future releases of Grafana Loki.
Structured metadata is an experimental feature and is subject to change in future releases of Grafana Loki. This feature is not yet available for Cloud Logs users.
{{% /admonition %}}

{{% admonition type="warning" %}}
Structured metadata was added to chunk format V4 which is used if the schema version is greater or equal to `13`. (See [Schema Config]({{< relref "../../storage#schema-config" >}}) for more details about schema versions. )
{{% /admonition %}}

One of the powerful features of Loki is parsing logs at query time to extract metadata and build labels out of it.
However, the parsing of logs at query time comes with a cost which can be significantly high for, as an example,
large JSON blobs or a poorly written query using complex regex patterns.
Selecting proper, low cardinality labels is critical to operating and querying Loki effectively. Some metadata, especially infrastructure related metadata, can be difficult to embed in log lines, and is too high cardinality to effectively store as indexed labels (and therefore reducing performance of the index).

In addition, the data extracted from logs at query time is usually high cardinality, which can’t be stored
in the index as it would increase the cardinality too much, and therefore reduce the performance of the index.

Structured metadata is a way to attach metadata to logs without indexing them. Examples of useful metadata are
trace IDs, user IDs, and any other label that is often used in queries but has high cardinality and is expensive
Structured metadata is a way to attach metadata to logs without indexing them or including them in the log line content itself. Examples of useful metadata are
kubernetes pod names, process ID's, or any other label that is often used in queries but has high cardinality and is expensive
to extract at query time.

Structured metadata can also be used to query commonly needed metadata from log lines without needing to apply a parser at query time. Large json blobs or a poorly written query using complex regex patterns, for example, come with a high performance cost. Examples of useful metadata include trace IDs or user IDs.


## Attaching structured metadata to log lines

You have the option to attach structured metadata to log lines in the push payload along with each log line and the timestamp.
Expand All @@ -34,25 +32,37 @@ See the [Promtail: Structured metadata stage]({{< relref "../../send-data/promta

With Loki version 1.2.0, support for structured metadata has been added to the Logstash output plugin. For more information, see [logstash]({{< relref "../../send-data/logstash/_index.md" >}}).

{{% admonition type="warning" %}}
There are defaults for how much structured metadata can be attached per log line.
```
# Maximum size accepted for structured metadata per log line.
# CLI flag: -limits.max-structured-metadata-size
[max_structured_metadata_size: <int> | default = 64KB]
# Maximum number of structured metadata entries per log line.
# CLI flag: -limits.max-structured-metadata-entries-count
[max_structured_metadata_entries_count: <int> | default = 128]
```
{{% /admonition %}}

## Querying structured metadata

Structured metadata is extracted automatically for each returned log line and added to the labels returned for the query.
You can use labels of structured metadata to filter log line using a [label filter expression]({{< relref "../../query/log_queries#label-filter-expression" >}}).

For example, if you have a label `trace_id` attached to some of your log lines as structured metadata, you can filter log lines using:
For example, if you have a label `pod` attached to some of your log lines as structured metadata, you can filter log lines using:

```logql
{job="example"} | trace_id="0242ac120002"
{job="example"} | pod="myservice-abc1234-56789"`
```

Of course, you can filter by multiple labels of structured metadata at the same time:

```logql
{job="example"} | trace_id="0242ac120002" | user_id="superUser123"
{job="example"} | pod="myservice-abc1234-56789" | trace_id="0242ac120002"
```

Note that since structured metadata is extracted automatically to the results labels, some metric queries might return
an error like `maximum of series (50000) reached for a single query`. You can use the [Keep]({{< relref "../../query/log_queries#keep-labels-expression" >}}) and [Drop]({{< relref "../../query/log_queries#drop-labels-expression" >}}) stages to filter out labels that you don't need.
Note that since structured metadata is extracted automatically to the results labels, some metric queries might return an error like `maximum of series (50000) reached for a single query`. You can use the [Keep]({{< relref "../../query/log_queries#keep-labels-expression" >}}) and [Drop]({{< relref "../../query/log_queries#drop-labels-expression" >}}) stages to filter out labels that you don't need.
For example:

```logql
Expand Down
6 changes: 5 additions & 1 deletion docs/sources/reference/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -1178,11 +1178,15 @@ Deletes all the rule groups in a namespace (including the namespace itself). Thi
### List rules

```
GET /prometheus/api/v1/rules
GET /prometheus/api/v1/rules?type={alert|record}&file={}&rule_group={}&rule_name={}
```

Prometheus-compatible rules endpoint to list alerting and recording rules that are currently loaded.

The `type` parameter is optional. If set, only the specified type of rule is returned.

The `file`, `rule_group` and `rule_name` parameters are optional, and can accept multiple values. If set, the response content is filtered accordingly.

For more information, refer to the [Prometheus rules](https://prometheus.io/docs/prometheus/latest/querying/api/#rules) documentation.

### List alerts
Expand Down
Loading

0 comments on commit 243c532

Please sign in to comment.