diff --git a/.chloggen/add-units-redisreceiver.yaml b/.chloggen/add-units-redisreceiver.yaml new file mode 100755 index 000000000000..3128f67d243a --- /dev/null +++ b/.chloggen/add-units-redisreceiver.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: redisreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Updates metric unit from no unit to Bytes. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [23454] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + Affected metrics can be found below. + - redis.clients.max_input_buffer + - redis.clients.max_output_buffer + - redis.replication.backlog_first_byte_offset + - redis.replication.offset diff --git a/receiver/redisreceiver/documentation.md b/receiver/redisreceiver/documentation.md index 85e0345fcae6..1c2f1c8841c2 100644 --- a/receiver/redisreceiver/documentation.md +++ b/receiver/redisreceiver/documentation.md @@ -34,7 +34,7 @@ Biggest input buffer among current client connections | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| | Gauge | Int | +| By | Gauge | Int | ### redis.clients.max_output_buffer @@ -42,7 +42,7 @@ Longest output list among current client connections | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| | Gauge | Int | +| By | Gauge | Int | ### redis.commands @@ -242,7 +242,7 @@ The master offset of the replication backlog buffer | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| | Gauge | Int | +| By | Gauge | Int | ### redis.replication.offset @@ -250,7 +250,7 @@ The server's current replication offset | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| | Gauge | Int | +| By | Gauge | Int | ### redis.slaves.connected diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics.go b/receiver/redisreceiver/internal/metadata/generated_metrics.go index e927948bc12c..42993129cc9d 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics.go @@ -191,7 +191,7 @@ type metricRedisClientsMaxInputBuffer struct { func (m *metricRedisClientsMaxInputBuffer) init() { m.data.SetName("redis.clients.max_input_buffer") m.data.SetDescription("Biggest input buffer among current client connections") - m.data.SetUnit("") + m.data.SetUnit("By") m.data.SetEmptyGauge() } @@ -240,7 +240,7 @@ type metricRedisClientsMaxOutputBuffer struct { func (m *metricRedisClientsMaxOutputBuffer) init() { m.data.SetName("redis.clients.max_output_buffer") m.data.SetDescription("Longest output list among current client connections") - m.data.SetUnit("") + m.data.SetUnit("By") m.data.SetEmptyGauge() } @@ -1503,7 +1503,7 @@ type metricRedisReplicationBacklogFirstByteOffset struct { func (m *metricRedisReplicationBacklogFirstByteOffset) init() { m.data.SetName("redis.replication.backlog_first_byte_offset") m.data.SetDescription("The master offset of the replication backlog buffer") - m.data.SetUnit("") + m.data.SetUnit("By") m.data.SetEmptyGauge() } @@ -1552,7 +1552,7 @@ type metricRedisReplicationOffset struct { func (m *metricRedisReplicationOffset) init() { m.data.SetName("redis.replication.offset") m.data.SetDescription("The server's current replication offset") - m.data.SetUnit("") + m.data.SetUnit("By") m.data.SetEmptyGauge() } diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics_test.go b/receiver/redisreceiver/internal/metadata/generated_metrics_test.go index 7e6a4e3b442f..aedd7ffc620a 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics_test.go @@ -248,7 +248,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Biggest input buffer among current client connections", ms.At(i).Description()) - assert.Equal(t, "", ms.At(i).Unit()) + assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -260,7 +260,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Longest output list among current client connections", ms.At(i).Description()) - assert.Equal(t, "", ms.At(i).Unit()) + assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -604,7 +604,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The master offset of the replication backlog buffer", ms.At(i).Description()) - assert.Equal(t, "", ms.At(i).Unit()) + assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -616,7 +616,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The server's current replication offset", ms.At(i).Description()) - assert.Equal(t, "", ms.At(i).Unit()) + assert.Equal(t, "By", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/redisreceiver/metadata.yaml b/receiver/redisreceiver/metadata.yaml index 8782bd78f06b..a25e3eb2b004 100644 --- a/receiver/redisreceiver/metadata.yaml +++ b/receiver/redisreceiver/metadata.yaml @@ -112,14 +112,14 @@ metrics: redis.clients.max_input_buffer: enabled: true description: Biggest input buffer among current client connections - unit: "" + unit: "By" gauge: value_type: int redis.clients.max_output_buffer: enabled: true description: Longest output list among current client connections - unit: "" + unit: "By" gauge: value_type: int @@ -284,14 +284,14 @@ metrics: redis.replication.backlog_first_byte_offset: enabled: true description: The master offset of the replication backlog buffer - unit: "" + unit: "By" gauge: value_type: int redis.replication.offset: enabled: true description: The server's current replication offset - unit: "" + unit: "By" gauge: value_type: int diff --git a/receiver/redisreceiver/testdata/integration/expected.yaml b/receiver/redisreceiver/testdata/integration/expected.yaml index 5553ff6408e1..e201566894d9 100644 --- a/receiver/redisreceiver/testdata/integration/expected.yaml +++ b/receiver/redisreceiver/testdata/integration/expected.yaml @@ -12,36 +12,38 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" - description: Number of client connections (excluding connections from replicas) name: redis.clients.connected sum: aggregationTemporality: 2 dataPoints: - asInt: "1" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" - description: Biggest input buffer among current client connections gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.clients.max_input_buffer + unit: By - description: Longest output list among current client connections gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.clients.max_output_buffer + unit: By - description: Number of commands processed per second gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.commands unit: '{ops}/s' - description: Total number of commands processed by the server @@ -50,8 +52,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: Total number of connections accepted by the server name: redis.connections.received @@ -59,8 +61,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "2" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: Number of connections rejected because of maxclients limit name: redis.connections.rejected @@ -68,42 +70,42 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: System CPU consumed by the Redis server in seconds since server start name: redis.cpu.time sum: aggregationTemporality: 2 dataPoints: - - asDouble: 0.028353 + - asDouble: 0.022172 attributes: - key: state value: stringValue: sys - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" - - asDouble: 0.003908 + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" + - asDouble: 0.00207 attributes: - key: state value: stringValue: sys_children - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" - - asDouble: 0.02064 + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" + - asDouble: 0.021117 attributes: - key: state value: stringValue: user - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" - - asDouble: 0 + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" + - asDouble: 0.001391 attributes: - key: state value: stringValue: user_children - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true unit: s - description: Number of evicted keys due to maxmemory limit @@ -112,8 +114,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: Total number of key expiration events name: redis.keys.expired @@ -121,8 +123,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: Number of successful lookup of keys in the main dictionary name: redis.keyspace.hits @@ -130,8 +132,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: Number of failed lookup of keys in the main dictionary name: redis.keyspace.misses @@ -139,54 +141,54 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true - description: Duration of the latest fork operation in microseconds gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.latest_fork unit: us - description: Ratio between used_memory_rss and used_memory gauge: dataPoints: - - asDouble: 8.36 - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + - asDouble: 9.6 + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.memory.fragmentation_ratio - description: Number of bytes used by the Lua engine gauge: dataPoints: - asInt: "37888" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.memory.lua unit: By - description: Peak memory consumed by Redis (in bytes) gauge: dataPoints: - - asInt: "899120" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + - asInt: "866352" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.memory.peak unit: By - description: Number of bytes that Redis allocated as seen by the operating system gauge: dataPoints: - - asInt: "6987776" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + - asInt: "7708672" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.memory.rss unit: By - description: Total number of bytes allocated by Redis using its allocator gauge: dataPoints: - - asInt: "899120" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + - asInt: "866352" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.memory.used unit: By - description: The total number of bytes read from the network @@ -195,8 +197,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "23" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true unit: By - description: The total number of bytes written to the network @@ -205,8 +207,8 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true unit: By - description: Number of changes since the last dump @@ -215,38 +217,40 @@ resourceMetrics: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" - description: The master offset of the replication backlog buffer gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.replication.backlog_first_byte_offset + unit: By - description: The server's current replication offset gauge: dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" name: redis.replication.offset + unit: By - description: Number of connected replicas name: redis.slaves.connected sum: aggregationTemporality: 2 dataPoints: - asInt: "0" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" - description: Number of seconds since Redis server start name: redis.uptime sum: aggregationTemporality: 2 dataPoints: - - asInt: "10" - startTimeUnixNano: "1684788673549655000" - timeUnixNano: "1684788683549655000" + - asInt: "1" + startTimeUnixNano: "1687203706819685000" + timeUnixNano: "1687203707819685000" isMonotonic: true unit: s scope: