From 6d0088440deab8be7ff8d7204e73c8a1e36cbbdb Mon Sep 17 00:00:00 2001 From: bryan-aguilar <46550959+bryan-aguilar@users.noreply.github.com> Date: Tue, 19 Sep 2023 01:29:20 -0700 Subject: [PATCH 1/9] [exporter/awsemf] Drop metrics with NaN values (#26344) **Description:** Metrics with NaN values for float types would cause the EMF Exporter to error out during JSON Marshaling. This PR introduces a change to drop metric values that contain NaN. **Link to tracking Issue:** Fixes #26267 **Testing:** Added unit tests at several different points with varying levels of specificity. Unit tests are quite verbose in this example but I have followed the format of existing tests while doing very little refactoring. **Documentation:** Update README --- .chloggen/awsemf_dropnan.yaml | 27 ++ exporter/awsemfexporter/README.md | 4 +- exporter/awsemfexporter/datapoint.go | 51 +++ exporter/awsemfexporter/datapoint_test.go | 357 +++++++++++++++++- exporter/awsemfexporter/emf_exporter_test.go | 47 ++- exporter/awsemfexporter/grouped_metric.go | 9 + .../awsemfexporter/grouped_metric_test.go | 68 ++++ 7 files changed, 548 insertions(+), 15 deletions(-) create mode 100755 .chloggen/awsemf_dropnan.yaml diff --git a/.chloggen/awsemf_dropnan.yaml b/.chloggen/awsemf_dropnan.yaml new file mode 100755 index 000000000000..35733977d70d --- /dev/null +++ b/.chloggen/awsemf_dropnan.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: awsemfexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: AWS EMF Exporter will not drop any metrics that contain NaN values to avoid JSON marshal errors. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [26267] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/awsemfexporter/README.md b/exporter/awsemfexporter/README.md index 1463eca1e26a..5e9f166c9810 100644 --- a/exporter/awsemfexporter/README.md +++ b/exporter/awsemfexporter/README.md @@ -20,7 +20,9 @@ and then sends them directly to CloudWatch Logs using the [PutLogEvents](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) API. ## Data Conversion -Convert OpenTelemetry ```Int64DataPoints```, ```DoubleDataPoints```, ```SummaryDataPoints``` metrics datapoints into CloudWatch ```EMF``` structured log formats and send it to CloudWatch. Logs and Metrics will be displayed in CloudWatch console. +Convert OpenTelemetry ```Int64DataPoints```, ```DoubleDataPoints```, ```SummaryDataPoints``` metrics datapoints into +CloudWatch ```EMF``` structured log formats and send it to CloudWatch. Logs and Metrics will be displayed in +CloudWatch console. NaN values are not supported by CloudWatch EMF and will be dropped by the exporter. ## Exporter Configuration diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index 4aba2c5bba50..0ac878aaa5e5 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -61,6 +61,10 @@ type dataPoints interface { // retained: indicates whether the data point is valid for further process // NOTE: It is an expensive call as it calculates the metric value. CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool, calculators *emfCalculators) (dataPoint []dataPoint, retained bool) + // IsStaleOrNaN returns true if metric value has NoRecordedValue flag set or if any metric value contains a NaN. + // When return value is true, IsStaleOrNaN also returns the attributes attached to the metric which can be used for + // logging purposes. + IsStaleOrNaN(i int) (bool, pcommon.Map) } // deltaMetricMetadata contains the metadata required to perform rate/delta calculation @@ -145,6 +149,17 @@ func (dps numberDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationS return []dataPoint{{name: dps.metricName, value: metricVal, labels: labels, timestampMs: timestampMs}}, retained } +func (dps numberDataPointSlice) IsStaleOrNaN(i int) (bool, pcommon.Map) { + metric := dps.NumberDataPointSlice.At(i) + if metric.Flags().NoRecordedValue() { + return true, metric.Attributes() + } + if metric.ValueType() == pmetric.NumberDataPointValueTypeDouble { + return math.IsNaN(metric.DoubleValue()), metric.Attributes() + } + return false, pcommon.Map{} +} + // CalculateDeltaDatapoints retrieves the HistogramDataPoint at the given index. func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) { metric := dps.HistogramDataPointSlice.At(i) @@ -164,6 +179,17 @@ func (dps histogramDataPointSlice) CalculateDeltaDatapoints(i int, instrumentati }}, true } +func (dps histogramDataPointSlice) IsStaleOrNaN(i int) (bool, pcommon.Map) { + metric := dps.HistogramDataPointSlice.At(i) + if metric.Flags().NoRecordedValue() { + return true, metric.Attributes() + } + if math.IsNaN(metric.Max()) || math.IsNaN(metric.Sum()) || math.IsNaN(metric.Min()) { + return true, metric.Attributes() + } + return false, pcommon.Map{} +} + // CalculateDeltaDatapoints retrieves the ExponentialHistogramDataPoint at the given index. func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, instrumentationScopeName string, _ bool, _ *emfCalculators) ([]dataPoint, bool) { metric := dps.ExponentialHistogramDataPointSlice.At(idx) @@ -246,6 +272,20 @@ func (dps exponentialHistogramDataPointSlice) CalculateDeltaDatapoints(idx int, }}, true } +func (dps exponentialHistogramDataPointSlice) IsStaleOrNaN(i int) (bool, pcommon.Map) { + metric := dps.ExponentialHistogramDataPointSlice.At(i) + if metric.Flags().NoRecordedValue() { + return true, metric.Attributes() + } + if math.IsNaN(metric.Max()) || + math.IsNaN(metric.Min()) || + math.IsNaN(metric.Sum()) { + return true, metric.Attributes() + } + + return false, pcommon.Map{} +} + // CalculateDeltaDatapoints retrieves the SummaryDataPoint at the given index and perform calculation with sum and count while retain the quantile value. func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentationScopeName string, detailedMetrics bool, calculators *emfCalculators) ([]dataPoint, bool) { metric := dps.SummaryDataPointSlice.At(i) @@ -303,6 +343,17 @@ func (dps summaryDataPointSlice) CalculateDeltaDatapoints(i int, instrumentation return datapoints, retained } +func (dps summaryDataPointSlice) IsStaleOrNaN(i int) (bool, pcommon.Map) { + metric := dps.SummaryDataPointSlice.At(i) + if metric.Flags().NoRecordedValue() { + return true, metric.Attributes() + } + if math.IsNaN(metric.Sum()) { + return true, metric.Attributes() + } + return false, metric.Attributes() +} + // createLabels converts OTel AttributesMap attributes to a map // and optionally adds in the OTel instrumentation library name func createLabels(attributes pcommon.Map, instrLibName string) map[string]string { diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index 3e4f0b46f7f3..004bcd481cb8 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -5,6 +5,7 @@ package awsemfexporter import ( "fmt" + "math" "reflect" "testing" "time" @@ -50,6 +51,22 @@ func generateTestGaugeMetric(name string, valueType metricValueType) pmetric.Met return otelMetrics } +func generateTestGaugeMetricNaN(name string) pmetric.Metrics { + otelMetrics := pmetric.NewMetrics() + rs := otelMetrics.ResourceMetrics().AppendEmpty() + metrics := rs.ScopeMetrics().AppendEmpty().Metrics() + metric := metrics.AppendEmpty() + metric.SetName(name) + metric.SetUnit("Count") + gaugeMetric := metric.SetEmptyGauge() + gaugeDatapoint := gaugeMetric.DataPoints().AppendEmpty() + gaugeDatapoint.Attributes().PutStr("label1", "value1") + + gaugeDatapoint.SetDoubleValue(math.NaN()) + + return otelMetrics +} + func generateTestSumMetric(name string, valueType metricValueType) pmetric.Metrics { otelMetrics := pmetric.NewMetrics() rs := otelMetrics.ResourceMetrics().AppendEmpty() @@ -93,6 +110,23 @@ func generateTestHistogramMetric(name string) pmetric.Metrics { return otelMetrics } +func generateTestHistogramMetricWithNaNs(name string) pmetric.Metrics { + otelMetrics := pmetric.NewMetrics() + rs := otelMetrics.ResourceMetrics().AppendEmpty() + metrics := rs.ScopeMetrics().AppendEmpty().Metrics() + metric := metrics.AppendEmpty() + metric.SetName(name) + metric.SetUnit("Seconds") + histogramMetric := metric.SetEmptyHistogram() + histogramDatapoint := histogramMetric.DataPoints().AppendEmpty() + histogramDatapoint.BucketCounts().FromRaw([]uint64{5, 6, 7}) + histogramDatapoint.ExplicitBounds().FromRaw([]float64{0, math.NaN()}) + histogramDatapoint.Attributes().PutStr("label1", "value1") + histogramDatapoint.SetCount(18) + histogramDatapoint.SetSum(math.NaN()) + return otelMetrics +} + func generateTestExponentialHistogramMetric(name string) pmetric.Metrics { otelMetrics := pmetric.NewMetrics() rs := otelMetrics.ResourceMetrics().AppendEmpty() @@ -121,6 +155,34 @@ func generateTestExponentialHistogramMetric(name string) pmetric.Metrics { return otelMetrics } +func generateTestExponentialHistogramMetricWithNaNs(name string) pmetric.Metrics { + otelMetrics := pmetric.NewMetrics() + rs := otelMetrics.ResourceMetrics().AppendEmpty() + metrics := rs.ScopeMetrics().AppendEmpty().Metrics() + metric := metrics.AppendEmpty() + metric.SetName(name) + metric.SetUnit("Seconds") + exponentialHistogramMetric := metric.SetEmptyExponentialHistogram() + + exponentialHistogramDatapoint := exponentialHistogramMetric.DataPoints().AppendEmpty() + exponentialHistogramDatapoint.SetCount(4) + exponentialHistogramDatapoint.SetSum(math.NaN()) + exponentialHistogramDatapoint.SetMin(math.NaN()) + exponentialHistogramDatapoint.SetMax(math.NaN()) + exponentialHistogramDatapoint.SetZeroCount(0) + exponentialHistogramDatapoint.SetScale(1) + exponentialHistogramDatapoint.Positive().SetOffset(1) + exponentialHistogramDatapoint.Positive().BucketCounts().FromRaw([]uint64{ + 1, 0, 1, + }) + exponentialHistogramDatapoint.Negative().SetOffset(1) + exponentialHistogramDatapoint.Negative().BucketCounts().FromRaw([]uint64{ + 1, 0, 1, + }) + exponentialHistogramDatapoint.Attributes().PutStr("label1", "value1") + return otelMetrics +} + func generateTestSummaryMetric(name string) pmetric.Metrics { otelMetrics := pmetric.NewMetrics() rs := otelMetrics.ResourceMetrics().AppendEmpty() @@ -146,6 +208,31 @@ func generateTestSummaryMetric(name string) pmetric.Metrics { return otelMetrics } +func generateTestSummaryMetricWithNaN(name string) pmetric.Metrics { + otelMetrics := pmetric.NewMetrics() + rs := otelMetrics.ResourceMetrics().AppendEmpty() + metrics := rs.ScopeMetrics().AppendEmpty().Metrics() + + for i := 0; i < 2; i++ { + metric := metrics.AppendEmpty() + metric.SetName(name) + metric.SetUnit("Seconds") + summaryMetric := metric.SetEmptySummary() + summaryDatapoint := summaryMetric.DataPoints().AppendEmpty() + summaryDatapoint.Attributes().PutStr("label1", "value1") + summaryDatapoint.SetCount(uint64(5 * i)) + summaryDatapoint.SetSum(math.NaN()) + firstQuantile := summaryDatapoint.QuantileValues().AppendEmpty() + firstQuantile.SetQuantile(0.0) + firstQuantile.SetValue(1) + secondQuantile := summaryDatapoint.QuantileValues().AppendEmpty() + secondQuantile.SetQuantile(100.0) + secondQuantile.SetValue(5) + } + + return otelMetrics +} + func generateOtelTestMetrics(generatedOtelMetrics ...pmetric.Metrics) pmetric.Metrics { finalOtelMetrics := pmetric.NewMetrics() rs := finalOtelMetrics.ResourceMetrics().AppendEmpty() @@ -185,6 +272,63 @@ func shutdownEmfCalculators(c *emfCalculators) error { } +func TestIsStaleOrNaN_NumberDataPointSlice(t *testing.T) { + testCases := []struct { + name string + metricName string + metricValue interface{} + expectedAssert assert.BoolAssertionFunc + setFlagsFunc func(point pmetric.NumberDataPoint) pmetric.NumberDataPoint + }{ + { + name: "nan", + metricValue: math.NaN(), + metricName: "NaN", + expectedAssert: assert.True, + }, + { + name: "valid float", + metricValue: 0.4, + metricName: "floaty mc-float-face", + expectedAssert: assert.False, + }, + { + name: "data point flag set", + metricValue: 0.4, + metricName: "floaty mc-float-face part two", + expectedAssert: assert.True, + setFlagsFunc: func(point pmetric.NumberDataPoint) pmetric.NumberDataPoint { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + return point + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + // Given the number datapoint (including Sum and Gauge OTEL metric type) with data type as int or double + numberDPS := pmetric.NewNumberDataPointSlice() + + numberDP := numberDPS.AppendEmpty() + if tc.setFlagsFunc != nil { + tc.setFlagsFunc(numberDP) + } + + switch v := tc.metricValue.(type) { + case int64: + numberDP.SetIntValue(v) + case float64: + numberDP.SetDoubleValue(v) + } + + numberDatapointSlice := numberDataPointSlice{deltaMetricMetadata{}, numberDPS} + isStaleOrNan, _ := numberDatapointSlice.IsStaleOrNaN(0) + tc.expectedAssert(t, isStaleOrNan) + }) + } +} + func TestCalculateDeltaDatapoints_NumberDataPointSlice(t *testing.T) { emfCalcs := setupEmfCalculators() defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) @@ -289,8 +433,8 @@ func TestCalculateDeltaDatapoints_NumberDataPointSlice(t *testing.T) { numberDP.SetDoubleValue(v) } - deltaMetricMetadata := generateDeltaMetricMetadata(tc.adjustToDelta, tc.metricName, retainInitialValueOfDeltaMetric) - numberDatapointSlice := numberDataPointSlice{deltaMetricMetadata, numberDPS} + dmd := generateDeltaMetricMetadata(tc.adjustToDelta, tc.metricName, retainInitialValueOfDeltaMetric) + numberDatapointSlice := numberDataPointSlice{dmd, numberDPS} // When calculate the delta datapoints for number datapoint dps, retained := numberDatapointSlice.CalculateDeltaDatapoints(0, instrLibName, false, emfCalcs) @@ -309,7 +453,7 @@ func TestCalculateDeltaDatapoints_NumberDataPointSlice(t *testing.T) { } func TestCalculateDeltaDatapoints_HistogramDataPointSlice(t *testing.T) { - deltaMetricMetadata := generateDeltaMetricMetadata(false, "foo", false) + dmd := generateDeltaMetricMetadata(false, "foo", false) testCases := []struct { name string @@ -374,7 +518,7 @@ func TestCalculateDeltaDatapoints_HistogramDataPointSlice(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(_ *testing.T) { // Given the histogram datapoints - histogramDatapointSlice := histogramDataPointSlice{deltaMetricMetadata, tc.histogramDPS} + histogramDatapointSlice := histogramDataPointSlice{dmd, tc.histogramDPS} emfCalcs := setupEmfCalculators() defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) // When calculate the delta datapoints for histograms @@ -390,8 +534,78 @@ func TestCalculateDeltaDatapoints_HistogramDataPointSlice(t *testing.T) { } +func TestIsStaleOrNaN_HistogramDataPointSlice(t *testing.T) { + + testCases := []struct { + name string + histogramDPS pmetric.HistogramDataPointSlice + boolAssertFunc assert.BoolAssertionFunc + setFlagsFunc func(point pmetric.HistogramDataPoint) pmetric.HistogramDataPoint + }{ + { + name: "Histogram with NaNs", + histogramDPS: func() pmetric.HistogramDataPointSlice { + histogramDPS := pmetric.NewHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(math.NaN()) + histogramDP.SetMin(math.NaN()) + histogramDP.SetMax(math.NaN()) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Histogram with min and max", + histogramDPS: func() pmetric.HistogramDataPointSlice { + histogramDPS := pmetric.NewHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(17.13) + histogramDP.SetMin(10) + histogramDP.SetMax(30) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.False, + }, + { + name: "Histogram with no NaNs", + histogramDPS: func() pmetric.HistogramDataPointSlice { + histogramDPS := pmetric.NewHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(17.13) + histogramDP.SetMin(10) + histogramDP.SetMax(30) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + setFlagsFunc: func(point pmetric.HistogramDataPoint) pmetric.HistogramDataPoint { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + return point + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(_ *testing.T) { + // Given the histogram datapoints + histogramDatapointSlice := histogramDataPointSlice{deltaMetricMetadata{}, tc.histogramDPS} + if tc.setFlagsFunc != nil { + tc.setFlagsFunc(histogramDatapointSlice.At(0)) + } + isStaleOrNan, _ := histogramDatapointSlice.IsStaleOrNaN(0) + tc.boolAssertFunc(t, isStaleOrNan) + }) + } + +} + func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSlice(t *testing.T) { - deltaMetricMetadata := generateDeltaMetricMetadata(false, "foo", false) + dmd := generateDeltaMetricMetadata(false, "foo", false) testCases := []struct { name string @@ -476,7 +690,7 @@ func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSlice(t *testing. for _, tc := range testCases { t.Run(tc.name, func(_ *testing.T) { // Given the histogram datapoints - exponentialHistogramDatapointSlice := exponentialHistogramDataPointSlice{deltaMetricMetadata, tc.histogramDPS} + exponentialHistogramDatapointSlice := exponentialHistogramDataPointSlice{dmd, tc.histogramDPS} emfCalcs := setupEmfCalculators() defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) // When calculate the delta datapoints for histograms @@ -491,11 +705,82 @@ func TestCalculateDeltaDatapoints_ExponentialHistogramDataPointSlice(t *testing. } +func TestIsStaleOrNaN_ExponentialHistogramDataPointSlice(t *testing.T) { + + testCases := []struct { + name string + histogramDPS pmetric.ExponentialHistogramDataPointSlice + boolAssertFunc assert.BoolAssertionFunc + setFlagsFunc func(point pmetric.ExponentialHistogramDataPoint) pmetric.ExponentialHistogramDataPoint + }{ + { + name: "Exponential histogram with non NaNs", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(17.13) + histogramDP.SetMin(10) + histogramDP.SetMax(30) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.False, + }, + { + name: "Exponential histogram with NaNs", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(math.NaN()) + histogramDP.SetMin(math.NaN()) + histogramDP.SetMax(math.NaN()) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + }, + { + name: "Exponential histogram with set flag func", + histogramDPS: func() pmetric.ExponentialHistogramDataPointSlice { + histogramDPS := pmetric.NewExponentialHistogramDataPointSlice() + histogramDP := histogramDPS.AppendEmpty() + histogramDP.SetCount(uint64(17)) + histogramDP.SetSum(17.13) + histogramDP.SetMin(10) + histogramDP.SetMax(30) + histogramDP.Attributes().PutStr("label1", "value1") + return histogramDPS + }(), + boolAssertFunc: assert.True, + setFlagsFunc: func(point pmetric.ExponentialHistogramDataPoint) pmetric.ExponentialHistogramDataPoint { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + return point + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(_ *testing.T) { + // Given the histogram datapoints + exponentialHistogramDatapointSlice := exponentialHistogramDataPointSlice{deltaMetricMetadata{}, tc.histogramDPS} + if tc.setFlagsFunc != nil { + tc.setFlagsFunc(exponentialHistogramDatapointSlice.At(0)) + } + isStaleOrNaN, _ := exponentialHistogramDatapointSlice.IsStaleOrNaN(0) + // When calculate the delta datapoints for histograms + tc.boolAssertFunc(t, isStaleOrNaN) + }) + } + +} + func TestCalculateDeltaDatapoints_SummaryDataPointSlice(t *testing.T) { emfCalcs := setupEmfCalculators() defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) for _, retainInitialValueOfDeltaMetric := range []bool{true, false} { - deltaMetricMetadata := generateDeltaMetricMetadata(true, "foo", retainInitialValueOfDeltaMetric) + dmd := generateDeltaMetricMetadata(true, "foo", retainInitialValueOfDeltaMetric) testCases := []struct { name string @@ -555,7 +840,7 @@ func TestCalculateDeltaDatapoints_SummaryDataPointSlice(t *testing.T) { secondQuantileValue.SetQuantile(100) secondQuantileValue.SetValue(tc.summaryMetricValue["secondQuantile"].(float64)) - summaryDatapointSlice := summaryDataPointSlice{deltaMetricMetadata, summaryDPS} + summaryDatapointSlice := summaryDataPointSlice{dmd, summaryDPS} // When calculate the delta datapoints for sum and count in summary dps, retained := summaryDatapointSlice.CalculateDeltaDatapoints(0, "", true, emfCalcs) @@ -576,6 +861,62 @@ func TestCalculateDeltaDatapoints_SummaryDataPointSlice(t *testing.T) { } } +func TestIsStaleOrNaN_SummaryDataPointSlice(t *testing.T) { + testCases := []struct { + name string + summaryMetricValue map[string]interface{} + expectedBoolAssert assert.BoolAssertionFunc + setFlagsFunc func(point pmetric.SummaryDataPoint) pmetric.SummaryDataPoint + }{ + { + name: "summary with no nan values", + summaryMetricValue: map[string]interface{}{"sum": float64(17.3), "count": uint64(17), "firstQuantile": float64(1), "secondQuantile": float64(5)}, + expectedBoolAssert: assert.False, + }, + { + name: "Summary with nan values", + summaryMetricValue: map[string]interface{}{"sum": math.NaN(), "count": uint64(25), "firstQuantile": math.NaN(), "secondQuantile": math.NaN()}, + expectedBoolAssert: assert.True, + }, + { + name: "Summary with set flag func", + summaryMetricValue: map[string]interface{}{"sum": math.NaN(), "count": uint64(25), "firstQuantile": math.NaN(), "secondQuantile": math.NaN()}, + expectedBoolAssert: assert.True, + setFlagsFunc: func(point pmetric.SummaryDataPoint) pmetric.SummaryDataPoint { + point.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + return point + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Given the summary datapoints with quantile 0, quantile 100, sum and count + summaryDPS := pmetric.NewSummaryDataPointSlice() + summaryDP := summaryDPS.AppendEmpty() + summaryDP.SetSum(tc.summaryMetricValue["sum"].(float64)) + summaryDP.SetCount(tc.summaryMetricValue["count"].(uint64)) + summaryDP.Attributes().PutStr("label1", "value1") + + summaryDP.QuantileValues().EnsureCapacity(2) + firstQuantileValue := summaryDP.QuantileValues().AppendEmpty() + firstQuantileValue.SetQuantile(0) + firstQuantileValue.SetValue(tc.summaryMetricValue["firstQuantile"].(float64)) + secondQuantileValue := summaryDP.QuantileValues().AppendEmpty() + secondQuantileValue.SetQuantile(100) + secondQuantileValue.SetValue(tc.summaryMetricValue["secondQuantile"].(float64)) + + summaryDatapointSlice := summaryDataPointSlice{deltaMetricMetadata{}, summaryDPS} + if tc.setFlagsFunc != nil { + tc.setFlagsFunc(summaryDatapointSlice.At(0)) + } + isStaleOrNaN, _ := summaryDatapointSlice.IsStaleOrNaN(0) + tc.expectedBoolAssert(t, isStaleOrNaN) + }) + } + +} + func TestCreateLabels(t *testing.T) { expectedLabels := map[string]string{ "a": "A", diff --git a/exporter/awsemfexporter/emf_exporter_test.go b/exporter/awsemfexporter/emf_exporter_test.go index ca42c71d5741..9d4426c2b819 100644 --- a/exporter/awsemfexporter/emf_exporter_test.go +++ b/exporter/awsemfexporter/emf_exporter_test.go @@ -6,7 +6,6 @@ package awsemfexporter import ( "context" "errors" - "os" "testing" "github.com/aws/aws-sdk-go/aws/awserr" @@ -15,6 +14,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -24,11 +24,6 @@ import ( const defaultRetryCount = 1 -func init() { - os.Setenv("AWS_ACCESS_KEY_ID", "test") - os.Setenv("AWS_SECRET_ACCESS_KEY", "test") -} - type mockPusher struct { mock.Mock } @@ -70,6 +65,46 @@ func TestConsumeMetrics(t *testing.T) { require.NoError(t, exp.shutdown(ctx)) } +func TestConsumeMetricsWithNaNValues(t *testing.T) { + tests := []struct { + testName string + generateFunc func(string) pmetric.Metrics + }{ + { + "histograme-with-nan", + generateTestHistogramMetricWithNaNs, + }, { + "gauge-with-nan", + generateTestGaugeMetricNaN, + }, { + "summary-with-nan", + generateTestSummaryMetricWithNaN, + }, { + "exponentialHistogram-with-nan", + generateTestExponentialHistogramMetricWithNaNs, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + factory := NewFactory() + expCfg := factory.CreateDefaultConfig().(*Config) + expCfg.Region = "us-west-2" + expCfg.MaxRetries = 0 + expCfg.OutputDestination = "stdout" + exp, err := newEmfExporter(expCfg, exportertest.NewNopCreateSettings()) + assert.Nil(t, err) + assert.NotNil(t, exp) + md := tc.generateFunc(tc.testName) + require.NoError(t, exp.pushMetricsData(ctx, md)) + require.NoError(t, exp.shutdown(ctx)) + }) + } + +} + func TestConsumeMetricsWithOutputDestination(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/exporter/awsemfexporter/grouped_metric.go b/exporter/awsemfexporter/grouped_metric.go index f26491218c0a..9c919c6f23c5 100644 --- a/exporter/awsemfexporter/grouped_metric.go +++ b/exporter/awsemfexporter/grouped_metric.go @@ -35,6 +35,15 @@ func addToGroupedMetric(pmd pmetric.Metric, groupedMetrics map[interface{}]*grou } for i := 0; i < dps.Len(); i++ { + // Drop stale or NaN metric values + if staleOrNan, attrs := dps.IsStaleOrNaN(i); staleOrNan { + if config != nil && config.logger != nil { + config.logger.Debug("dropped metric with nan value", + zap.String("metric.name", pmd.Name()), + zap.Any("metric.attributes", attrs)) + } + continue + } dps, retained := dps.CalculateDeltaDatapoints(i, metadata.instrumentationScopeName, config.DetailedMetrics, calculators) if !retained { continue diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index adb6dc114a01..275506d4eeea 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -192,6 +192,74 @@ func TestAddToGroupedMetric(t *testing.T) { } }) + t.Run("Add multiple different metrics with NaN types", func(t *testing.T) { + emfCalcs := setupEmfCalculators() + defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) + + groupedMetrics := make(map[interface{}]*groupedMetric) + generateMetrics := []pmetric.Metrics{ + generateTestGaugeMetric("int-gauge", intValueType), + generateTestGaugeMetric("double-gauge", doubleValueType), + generateTestHistogramMetric("histogram"), + generateTestSumMetric("int-sum", intValueType), + generateTestSumMetric("double-sum", doubleValueType), + generateTestSummaryMetric("summary"), + // We do not expect these to be added to the grouped metric. Metrics with NaN values should be dropped. + generateTestGaugeMetricNaN("double-gauge-nan"), + generateTestExponentialHistogramMetricWithNaNs("expo-with-nan"), + generateTestHistogramMetricWithNaNs("histo-with-nan"), + generateTestSummaryMetricWithNaN("sum-with-nan"), + } + + finalOtelMetrics := generateOtelTestMetrics(generateMetrics...) + rms := finalOtelMetrics.ResourceMetrics() + ilms := rms.At(0).ScopeMetrics() + metrics := ilms.At(0).Metrics() + require.Equal(t, 14, metrics.Len(), "mock metric creation failed") + for i := 0; i < metrics.Len(); i++ { + err := addToGroupedMetric(metrics.At(i), + groupedMetrics, + generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, metrics.At(i).Type()), + true, + logger, + nil, + testCfg, + emfCalcs) + assert.Nil(t, err) + } + + assert.Equal(t, 4, len(groupedMetrics)) + for _, group := range groupedMetrics { + for metricName, metricInfo := range group.metrics { + switch metricName { + case "int-gauge", "double-gauge": + assert.Len(t, group.metrics, 2) + assert.Equal(t, "Count", metricInfo.unit) + assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, pmetric.MetricTypeGauge), group.metadata) + case "int-sum", "double-sum": + assert.Len(t, group.metrics, 2) + assert.Equal(t, "Count", metricInfo.unit) + assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, pmetric.MetricTypeSum), group.metadata) + case "histogram": + assert.Len(t, group.metrics, 1) + assert.Equal(t, "Seconds", metricInfo.unit) + assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, pmetric.MetricTypeHistogram), group.metadata) + case "summary": + assert.Len(t, group.metrics, 1) + assert.Equal(t, "Seconds", metricInfo.unit) + assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, pmetric.MetricTypeSummary), group.metadata) + default: + assert.Fail(t, fmt.Sprintf("Unhandled metric %s not expected", metricName)) + } + expectedLabels := map[string]string{ + oTellibDimensionKey: "cloudwatch-otel", + "label1": "value1", + } + assert.Equal(t, expectedLabels, group.labels) + } + } + }) + t.Run("Add same metric but different log group", func(t *testing.T) { emfCalcs := setupEmfCalculators() defer require.NoError(t, shutdownEmfCalculators(emfCalcs)) From 4496b8b2537aa799e3a6435d278070b4003206e3 Mon Sep 17 00:00:00 2001 From: Jared Tan Date: Tue, 19 Sep 2023 21:38:39 +0800 Subject: [PATCH 2/9] [exporter/loadbalancer] unexport `MetricsViews` function (#26992) **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/26304 Signed-off-by: Jared Tan --- cmd/checkapi/allowlist.txt | 1 - exporter/loadbalancingexporter/factory.go | 2 +- exporter/loadbalancingexporter/metrics.go | 4 ++-- exporter/loadbalancingexporter/metrics_test.go | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/checkapi/allowlist.txt b/cmd/checkapi/allowlist.txt index 26e865594269..3f58158f1143 100644 --- a/cmd/checkapi/allowlist.txt +++ b/cmd/checkapi/allowlist.txt @@ -6,7 +6,6 @@ exporter/dynatraceexporter exporter/f5cloudexporter exporter/kafkaexporter exporter/kineticaexporter -exporter/loadbalancingexporter exporter/logzioexporter exporter/lokiexporter exporter/pulsarexporter diff --git a/exporter/loadbalancingexporter/factory.go b/exporter/loadbalancingexporter/factory.go index 52501eacd771..79d968143346 100644 --- a/exporter/loadbalancingexporter/factory.go +++ b/exporter/loadbalancingexporter/factory.go @@ -18,7 +18,7 @@ import ( // NewFactory creates a factory for the exporter. func NewFactory() exporter.Factory { - _ = view.Register(MetricViews()...) + _ = view.Register(metricViews()...) return exporter.NewFactory( metadata.Type, diff --git a/exporter/loadbalancingexporter/metrics.go b/exporter/loadbalancingexporter/metrics.go index 66a3d900b7f7..0bbdb4528d13 100644 --- a/exporter/loadbalancingexporter/metrics.go +++ b/exporter/loadbalancingexporter/metrics.go @@ -19,8 +19,8 @@ var ( successFalseMutator = tag.Upsert(tag.MustNewKey("success"), "false") ) -// MetricViews return the metrics views according to given telemetry level. -func MetricViews() []*view.View { +// metricViews return the metrics views according to given telemetry level. +func metricViews() []*view.View { return []*view.View{ { Name: mNumResolutions.Name(), diff --git a/exporter/loadbalancingexporter/metrics_test.go b/exporter/loadbalancingexporter/metrics_test.go index 87759d6de111..2db6ea4183fa 100644 --- a/exporter/loadbalancingexporter/metrics_test.go +++ b/exporter/loadbalancingexporter/metrics_test.go @@ -17,7 +17,7 @@ func TestProcessorMetrics(t *testing.T) { "loadbalancer_backend_latency", } - views := MetricViews() + views := metricViews() for i, viewName := range expectedViewNames { assert.Equal(t, viewName, views[i].Name) } From ee1089b51b1885843d6c45be9e24985f32467adb Mon Sep 17 00:00:00 2001 From: OpenTelemetry Bot <107717825+opentelemetrybot@users.noreply.github.com> Date: Tue, 19 Sep 2023 07:16:04 -0700 Subject: [PATCH 3/9] Add JMX metrics gatherer version 1.30.0-alpha (#26994) Add JMX metrics gatherer version `1.30.0-alpha`. cc @open-telemetry/java-contrib-approvers --- receiver/jmxreceiver/supported_jars.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/receiver/jmxreceiver/supported_jars.go b/receiver/jmxreceiver/supported_jars.go index 3a2c942e899a..af236edfef9f 100644 --- a/receiver/jmxreceiver/supported_jars.go +++ b/receiver/jmxreceiver/supported_jars.go @@ -31,6 +31,10 @@ func oldFormatProperties(c *Config, j supportedJar) error { // If you change this variable name, please open an issue in opentelemetry-java-contrib // so that repository's release automation can be updated var jmxMetricsGathererVersions = map[string]supportedJar{ + "0b4b280fa745702e83a0b3c0d191144c9069c215848c61d3092d41f000770e12": { + version: "1.30.0-alpha", + jar: "JMX metrics gatherer", + }, "0b947c255f3fd358343ab43475875dbb09233d369ff91a88a28c38f767a2a6fb": { version: "1.29.0-alpha", jar: "JMX metrics gatherer", From 10851b4646da7051fd538f11f375321fab837de7 Mon Sep 17 00:00:00 2001 From: VihasMakwana <121151420+VihasMakwana@users.noreply.github.com> Date: Tue, 19 Sep 2023 20:00:38 +0530 Subject: [PATCH 4/9] [pkg/fileconsumer] fix logging (#26706) **Description:** On a failure to open the file, the log should be at `ErrorLevel`, not at `DebugLevel`. Also, log errors in missing places. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector/issues/8442 --- pkg/stanza/fileconsumer/file.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/stanza/fileconsumer/file.go b/pkg/stanza/fileconsumer/file.go index 5f28bf15d0e2..3c7f87062734 100644 --- a/pkg/stanza/fileconsumer/file.go +++ b/pkg/stanza/fileconsumer/file.go @@ -199,14 +199,14 @@ func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.Fi } file, err := os.Open(path) // #nosec - operator must read in files defined by user if err != nil { - m.Debugf("Failed to open file", zap.Error(err)) + m.Errorw("Failed to open file", zap.Error(err)) return nil, nil } fp, err := m.readerFactory.newFingerprint(file) if err != nil { if err = file.Close(); err != nil { - m.Errorf("problem closing file %s", file.Name()) + m.Debugw("problem closing file", zap.Error(err)) } return nil, nil } @@ -214,7 +214,7 @@ func (m *Manager) makeFingerprint(path string) (*fingerprint.Fingerprint, *os.Fi if len(fp.FirstBytes) == 0 { // Empty file, don't read it until we can compare its fingerprint if err = file.Close(); err != nil { - m.Errorf("problem closing file %s", file.Name()) + m.Debugw("problem closing file", zap.Error(err)) } return nil, nil } @@ -243,7 +243,7 @@ func (m *Manager) makeReader(path string) *reader { // Exclude any empty fingerprints or duplicate fingerprints to avoid doubling up on copy-truncate files if m.checkDuplicates(fp) { if err := file.Close(); err != nil { - m.Errorf("problem closing file", "file", file.Name()) + m.Debugw("problem closing file", zap.Error(err)) } return nil } From e2e212c1b2d26c0e9b290199820b4df310b883ee Mon Sep 17 00:00:00 2001 From: Haim Rubinstein Date: Tue, 19 Sep 2023 18:35:11 +0300 Subject: [PATCH 5/9] Feature/harubins/add multiline pattern config (#26460) **Description:** Add a flag to the multiline config that if set to true will omit the pattern from the logs. **Link to tracking Issue:** https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/26381 **Testing:** manual testing and unit tests in multile_test file. --- .../add_multiline_pattern_omit_config.yaml | 17 ++ pkg/stanza/docs/operators/file_input.md | 2 + pkg/stanza/docs/operators/tcp_input.md | 2 + pkg/stanza/docs/operators/udp_input.md | 2 + pkg/stanza/split/split.go | 21 +- pkg/stanza/split/split_test.go | 196 +++++++++++++++++- pkg/stanza/split/splittest/splittest.go | 1 + receiver/filelogreceiver/README.md | 2 + receiver/tcplogreceiver/README.md | 2 + receiver/udplogreceiver/README.md | 2 + 10 files changed, 240 insertions(+), 7 deletions(-) create mode 100644 .chloggen/add_multiline_pattern_omit_config.yaml diff --git a/.chloggen/add_multiline_pattern_omit_config.yaml b/.chloggen/add_multiline_pattern_omit_config.yaml new file mode 100644 index 000000000000..6ff655041a20 --- /dev/null +++ b/.chloggen/add_multiline_pattern_omit_config.yaml @@ -0,0 +1,17 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/stanza + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add 'omit_pattern' setting to `split.Config`. + +# One or more tracking issues related to the change +issues: [26381] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + This can be used omit the start or end pattern from a log entry. \ No newline at end of file diff --git a/pkg/stanza/docs/operators/file_input.md b/pkg/stanza/docs/operators/file_input.md index d039c88baf14..a2b93f56db2f 100644 --- a/pkg/stanza/docs/operators/file_input.md +++ b/pkg/stanza/docs/operators/file_input.md @@ -44,6 +44,8 @@ If set, the `multiline` configuration block instructs the `file_input` operator The `multiline` configuration block must contain exactly one of `line_start_pattern` or `line_end_pattern`. These are regex patterns that match either the beginning of a new log entry, or the end of a log entry. +The `omit_pattern` setting can be used to omit the start/end pattern from each entry. + If using multiline, last log can sometimes be not flushed due to waiting for more content. In order to forcefully flush last buffered log after certain period of time, use `force_flush_period` option. diff --git a/pkg/stanza/docs/operators/tcp_input.md b/pkg/stanza/docs/operators/tcp_input.md index ed690a432bcc..37197f49fcb1 100644 --- a/pkg/stanza/docs/operators/tcp_input.md +++ b/pkg/stanza/docs/operators/tcp_input.md @@ -39,6 +39,8 @@ If set, the `multiline` configuration block instructs the `tcp_input` operator t The `multiline` configuration block must contain exactly one of `line_start_pattern` or `line_end_pattern`. These are regex patterns that match either the beginning of a new log entry, or the end of a log entry. +The `omit_pattern` setting can be used to omit the start/end pattern from each entry. + #### Supported encodings | Key | Description diff --git a/pkg/stanza/docs/operators/udp_input.md b/pkg/stanza/docs/operators/udp_input.md index 8697b3f49427..4a31a02718d9 100644 --- a/pkg/stanza/docs/operators/udp_input.md +++ b/pkg/stanza/docs/operators/udp_input.md @@ -28,6 +28,8 @@ If set, the `multiline` configuration block instructs the `udp_input` operator t The `multiline` configuration block must contain exactly one of `line_start_pattern` or `line_end_pattern`. These are regex patterns that match either the beginning of a new log entry, or the end of a log entry. +The `omit_pattern` setting can be used to omit the start/end pattern from each entry. + #### Supported encodings | Key | Description diff --git a/pkg/stanza/split/split.go b/pkg/stanza/split/split.go index 976a70bf90f7..b5883c14841f 100644 --- a/pkg/stanza/split/split.go +++ b/pkg/stanza/split/split.go @@ -16,6 +16,7 @@ import ( type Config struct { LineStartPattern string `mapstructure:"line_start_pattern"` LineEndPattern string `mapstructure:"line_end_pattern"` + OmitPattern bool `mapstructure:"omit_pattern"` } // Func will return a bufio.SplitFunc based on the config @@ -37,20 +38,20 @@ func (c Config) Func(enc encoding.Encoding, flushAtEOF bool, maxLogSize int) (sp if err != nil { return nil, fmt.Errorf("compile line end regex: %w", err) } - splitFunc = LineEndSplitFunc(re, flushAtEOF) + splitFunc = LineEndSplitFunc(re, c.OmitPattern, flushAtEOF) case c.LineStartPattern != "": re, err := regexp.Compile("(?m)" + c.LineStartPattern) if err != nil { return nil, fmt.Errorf("compile line start regex: %w", err) } - splitFunc = LineStartSplitFunc(re, flushAtEOF) + splitFunc = LineStartSplitFunc(re, c.OmitPattern, flushAtEOF) } return splitFunc, nil } // LineStartSplitFunc creates a bufio.SplitFunc that splits an incoming stream into // tokens that start with a match to the regex pattern provided -func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { +func LineStartSplitFunc(re *regexp.Regexp, omitPattern bool, flushAtEOF bool) bufio.SplitFunc { return func(data []byte, atEOF bool) (advance int, token []byte, err error) { firstLoc := re.FindIndex(data) if firstLoc == nil { @@ -81,6 +82,10 @@ func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { // Flush if no more data is expected if atEOF && flushAtEOF { + if omitPattern { + return len(data), data[firstMatchEnd:], nil + } + return len(data), data, nil } @@ -90,6 +95,9 @@ func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { return 0, nil, nil // read more data and try again } secondMatchStart := secondLoc[0] + secondLocOfset + if omitPattern { + return secondMatchStart, data[firstMatchEnd:secondMatchStart], nil + } // start scanning at the beginning of the second match // the token begins at the first match, and ends at the beginning of the second match @@ -99,7 +107,7 @@ func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { // LineEndSplitFunc creates a bufio.SplitFunc that splits an incoming stream into // tokens that end with a match to the regex pattern provided -func LineEndSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { +func LineEndSplitFunc(re *regexp.Regexp, omitPattern bool, flushAtEOF bool) bufio.SplitFunc { return func(data []byte, atEOF bool) (advance int, token []byte, err error) { loc := re.FindIndex(data) if loc == nil { @@ -115,6 +123,11 @@ func LineEndSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { if loc[1] == len(data)-1 && !atEOF { return 0, nil, nil } + + if omitPattern { + return loc[1], data[:loc[0]], nil + } + return loc[1], data[:loc[1]], nil } } diff --git a/pkg/stanza/split/split_test.go b/pkg/stanza/split/split_test.go index 9af21034b1ca..9db4b7dbb39e 100644 --- a/pkg/stanza/split/split_test.go +++ b/pkg/stanza/split/split_test.go @@ -75,6 +75,15 @@ func TestLineStartSplitFunc(t *testing.T) { `LOGSTART 123 log1`, }, }, + { + Name: "OneLogSimpleOmitPattern", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + Input: []byte("LOGSTART 123 log1LOGSTART 123 a"), + ExpectedTokens: []string{ + `log1`, + }, + }, { Name: "TwoLogsSimple", Pattern: `LOGSTART \d+ `, @@ -84,6 +93,17 @@ func TestLineStartSplitFunc(t *testing.T) { `LOGSTART 234 log2 `, }, }, + { + + Name: "TwoLogsSimpleOmitPattern", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + Input: []byte(`LOGSTART 123 log1 LOGSTART 234 log2 LOGSTART 345 foo`), + ExpectedTokens: []string{ + `log1 `, + `log2 `, + }, + }, { Name: "TwoLogsLineStart", Pattern: `^LOGSTART \d+ `, @@ -93,11 +113,38 @@ func TestLineStartSplitFunc(t *testing.T) { "LOGSTART 234 log2\n", }, }, + { + Name: "TwoLogsLineStartOmitPattern", + Pattern: `^LOGSTART \d+ `, + OmitPattern: true, + Input: []byte("LOGSTART 123 LOGSTART 345 log1\nLOGSTART 234 log2\nLOGSTART 345 foo"), + ExpectedTokens: []string{ + "LOGSTART 345 log1\n", + "log2\n", + }, + }, + { + Name: "TwoLogsLineStartOmitPatternNoStringBeginningToken", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + Input: []byte("LOGSTART 123 LOGSTART 345 log1\nLOGSTART 234 log2\nLOGSTART 345 foo"), + ExpectedTokens: []string{ + "LOGSTART 345 log1\n", + "log2\n", + }, + }, { Name: "NoMatches", Pattern: `LOGSTART \d+ `, Input: []byte(`file that has no matches in it`), }, + { + + Name: "NoMatchesOmitPattern", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + Input: []byte(`file that has no matches in it`), + }, { Name: "PrecedingNonMatches", Pattern: `LOGSTART \d+ `, @@ -107,6 +154,16 @@ func TestLineStartSplitFunc(t *testing.T) { `LOGSTART 123 part that matches`, }, }, + { + Name: "PrecedingNonMatchesOmitPattern", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + Input: []byte(`part that doesn't match LOGSTART 123 part that matchesLOGSTART 123 foo`), + ExpectedTokens: []string{ + `part that doesn't match `, + `part that matches`, + }, + }, { Name: "HugeLog100", Pattern: `LOGSTART \d+ `, @@ -120,6 +177,21 @@ func TestLineStartSplitFunc(t *testing.T) { `LOGSTART 123 ` + string(splittest.GenerateBytes(100)), }, }, + { + Name: "HugeLog100OmitPattern", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + + Input: func() []byte { + newInput := []byte(`LOGSTART 123 `) + newInput = append(newInput, splittest.GenerateBytes(100)...) + newInput = append(newInput, []byte(`LOGSTART 234 endlog`)...) + return newInput + }(), + ExpectedTokens: []string{ + string(splittest.GenerateBytes(100)), + }, + }, { Name: "HugeLog10000", Pattern: `LOGSTART \d+ `, @@ -144,6 +216,18 @@ func TestLineStartSplitFunc(t *testing.T) { }(), ExpectedError: errors.New("bufio.Scanner: token too long"), }, + { + Name: "ErrTooLongOmitPattern", + Pattern: `LOGSTART \d+ `, + OmitPattern: true, + Input: func() []byte { + newInput := []byte(`LOGSTART 123 `) + newInput = append(newInput, splittest.GenerateBytes(1000000)...) + newInput = append(newInput, []byte(`LOGSTART 234 endlog`)...) + return newInput + }(), + ExpectedError: errors.New("bufio.Scanner: token too long"), + }, { Name: "MultipleMultilineLogs", Pattern: `^LOGSTART \d+`, @@ -153,6 +237,23 @@ func TestLineStartSplitFunc(t *testing.T) { "LOGSTART 17 log2\nLOGPART log2\nanother line\n", }, }, + { + Name: "MultipleMultilineLogsOmitPattern", + Pattern: `^LOGSTART \d+`, + OmitPattern: true, + Input: []byte("LOGSTART 12 log1\t \nLOGPART log1\nLOGPART log1\t \nLOGSTART 17 log2\nLOGPART log2\nanother line\nLOGSTART 43 log5"), + ExpectedTokens: []string{ + " log1\t \nLOGPART log1\nLOGPART log1\t \n", + " log2\nLOGPART log2\nanother line\n", + }, + }, + { + + Name: "LogsWithoutFlusherOmitPattern", + Pattern: `^LOGSTART \d+`, + OmitPattern: true, + Input: []byte("LOGPART log1\nLOGPART log1\t \n"), + }, { Name: "NoMatch", Pattern: `^LOGSTART \d+`, @@ -161,14 +262,14 @@ func TestLineStartSplitFunc(t *testing.T) { } for _, tc := range testCases { - cfg := Config{LineStartPattern: tc.Pattern} + cfg := Config{LineStartPattern: tc.Pattern, OmitPattern: tc.OmitPattern} splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) t.Run(tc.Name, tc.Run(splitFunc)) } t.Run("FirstMatchHitsEndOfBuffer", func(t *testing.T) { - splitFunc := LineStartSplitFunc(regexp.MustCompile("LOGSTART"), false) + splitFunc := LineStartSplitFunc(regexp.MustCompile("LOGSTART"), false, false) data := []byte(`LOGSTART`) t.Run("NotAtEOF", func(t *testing.T) { @@ -255,6 +356,15 @@ func TestLineEndSplitFunc(t *testing.T) { `my log LOGEND 123`, }, }, + { + Name: "OneLogSimpleOmitPattern", + Pattern: `LOGEND \d+`, + OmitPattern: true, + Input: []byte(`my log LOGEND 123`), + ExpectedTokens: []string{ + `my log `, + }, + }, { Name: "TwoLogsSimple", Pattern: `LOGEND \d+`, @@ -264,6 +374,16 @@ func TestLineEndSplitFunc(t *testing.T) { `log2 LOGEND 234`, }, }, + { + Name: "TwoLogsSimpleOmitPattern", + Pattern: `LOGEND \d+`, + OmitPattern: true, + Input: []byte(`log1 LOGEND 123log2 LOGEND 234`), + ExpectedTokens: []string{ + `log1 `, + `log2 `, + }, + }, { Name: "TwoLogsLineEndSimple", Pattern: `LOGEND$`, @@ -273,11 +393,38 @@ func TestLineEndSplitFunc(t *testing.T) { "\nlog2 LOGEND", }, }, + { + Name: "TwoLogsLineEndSimpleOmitPattern", + Pattern: `LOGEND$`, + OmitPattern: true, + Input: []byte("log1 LOGEND LOGEND\nlog2 LOGEND\n"), + ExpectedTokens: []string{ + "log1 LOGEND ", + "\nlog2 ", + }, + }, + { + Name: "TwoLogsLineEndSimpleOmitPatternNoStringEndingToken", + Pattern: `LOGEND`, + OmitPattern: true, + Input: []byte("log1 LOGEND LOGEND\nlog2 LOGEND\n"), + ExpectedTokens: []string{ + "log1 ", + " ", + "\nlog2 ", + }, + }, { Name: "NoMatches", Pattern: `LOGEND \d+`, Input: []byte(`file that has no matches in it`), }, + { + Name: "NoMatchesOmitPattern", + OmitPattern: true, + Pattern: `LOGEND \d+`, + Input: []byte(`file that has no matches in it`), + }, { Name: "NonMatchesAfter", Pattern: `LOGEND \d+`, @@ -286,6 +433,15 @@ func TestLineEndSplitFunc(t *testing.T) { `part that matches LOGEND 123`, }, }, + { + Name: "NonMatchesAfterOmitPattern", + Pattern: `LOGEND \d+`, + OmitPattern: true, + Input: []byte(`part that matches LOGEND 123 part that doesn't match`), + ExpectedTokens: []string{ + `part that matches `, + }, + }, { Name: "HugeLog100", Pattern: `LOGEND \d`, @@ -298,6 +454,19 @@ func TestLineEndSplitFunc(t *testing.T) { string(splittest.GenerateBytes(100)) + `LOGEND 1`, }, }, + { + Name: "HugeLog100OmitPattern", + Pattern: `LOGEND \d`, + OmitPattern: true, + Input: func() []byte { + newInput := splittest.GenerateBytes(100) + newInput = append(newInput, []byte(`LOGEND 1 `)...) + return newInput + }(), + ExpectedTokens: []string{ + string(splittest.GenerateBytes(100)), + }, + }, { Name: "HugeLog10000", Pattern: `LOGEND \d`, @@ -320,6 +489,17 @@ func TestLineEndSplitFunc(t *testing.T) { }(), ExpectedError: errors.New("bufio.Scanner: token too long"), }, + { + Name: "HugeLog1000000OmitPattern", + Pattern: `LOGEND \d`, + OmitPattern: true, + Input: func() []byte { + newInput := splittest.GenerateBytes(1000000) + newInput = append(newInput, []byte(`LOGEND 1 `)...) + return newInput + }(), + ExpectedError: errors.New("bufio.Scanner: token too long"), + }, { Name: "MultiplesplitLogs", Pattern: `^LOGEND.*$`, @@ -329,6 +509,16 @@ func TestLineEndSplitFunc(t *testing.T) { "\nLOGSTART 17 log2\nLOGPART log2\nLOGEND log2", }, }, + { + Name: "MultipleMultilineLogsOmitPattern", + Pattern: `^LOGEND.*$`, + OmitPattern: true, + Input: []byte("LOGSTART 12 log1\t \nLOGPART log1\nLOGEND log1\t \nLOGSTART 17 log2\nLOGPART log2\nLOGEND log2\nLOGSTART 43 log5"), + ExpectedTokens: []string{ + "LOGSTART 12 log1\t \nLOGPART log1\n", + "\nLOGSTART 17 log2\nLOGPART log2\n", + }, + }, { Name: "NoMatch", Pattern: `^LOGEND.*$`, @@ -337,7 +527,7 @@ func TestLineEndSplitFunc(t *testing.T) { } for _, tc := range testCases { - cfg := Config{LineEndPattern: tc.Pattern} + cfg := Config{LineEndPattern: tc.Pattern, OmitPattern: tc.OmitPattern} splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) t.Run(tc.Name, tc.Run(splitFunc)) diff --git a/pkg/stanza/split/splittest/splittest.go b/pkg/stanza/split/splittest/splittest.go index b784b1b68dec..643d6db0a59c 100644 --- a/pkg/stanza/split/splittest/splittest.go +++ b/pkg/stanza/split/splittest/splittest.go @@ -73,6 +73,7 @@ func (r *testReader) splitFunc(split bufio.SplitFunc) bufio.SplitFunc { type TestCase struct { Name string Pattern string + OmitPattern bool Input []byte ExpectedTokens []string ExpectedError error diff --git a/receiver/filelogreceiver/README.md b/receiver/filelogreceiver/README.md index 70ccdc1e6a17..f4f730876ef2 100644 --- a/receiver/filelogreceiver/README.md +++ b/receiver/filelogreceiver/README.md @@ -74,6 +74,8 @@ If set, the `multiline` configuration block instructs the `file_input` operator The `multiline` configuration block must contain exactly one of `line_start_pattern` or `line_end_pattern`. These are regex patterns that match either the beginning of a new log entry, or the end of a log entry. +The `omit_pattern` setting can be used to omit the start/end pattern from each entry. + ### Supported encodings | Key | Description diff --git a/receiver/tcplogreceiver/README.md b/receiver/tcplogreceiver/README.md index d1e7a70b6d46..359a32764691 100644 --- a/receiver/tcplogreceiver/README.md +++ b/receiver/tcplogreceiver/README.md @@ -64,6 +64,8 @@ If set, the `multiline` configuration block instructs the `tcplog` receiver to s The `multiline` configuration block must contain exactly one of `line_start_pattern` or `line_end_pattern`. These are regex patterns that match either the beginning of a new log entry, or the end of a log entry. +The `omit_pattern` setting can be used to omit the start/end pattern from each entry. + #### Supported encodings | Key | Description diff --git a/receiver/udplogreceiver/README.md b/receiver/udplogreceiver/README.md index e5b0041d956d..cfe8d7ee2d00 100644 --- a/receiver/udplogreceiver/README.md +++ b/receiver/udplogreceiver/README.md @@ -52,6 +52,8 @@ If set, the `multiline` configuration block instructs the `udplog` receiver to s The `multiline` configuration block must contain exactly one of `line_start_pattern` or `line_end_pattern`. These are regex patterns that match either the beginning of a new log entry, or the end of a log entry. +The `omit_pattern` setting can be used to omit the start/end pattern from each entry. + ### Supported encodings | Key | Description From aa00fb581f016f8e425bac74fbea9ccefc45f91e Mon Sep 17 00:00:00 2001 From: Evan Bradley <11745660+evan-bradley@users.noreply.github.com> Date: Tue, 19 Sep 2023 12:12:47 -0400 Subject: [PATCH 6/9] [cmd/opampsupervisor] Upgrade opamp-go to v0.8.0 (#27000) **Description:** Upgrade opamp-go to v0.8.0. Should not have any functional impact. --- cmd/opampsupervisor/go.mod | 2 +- cmd/opampsupervisor/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/opampsupervisor/go.mod b/cmd/opampsupervisor/go.mod index 557819fcf8b3..40b246d08e0b 100644 --- a/cmd/opampsupervisor/go.mod +++ b/cmd/opampsupervisor/go.mod @@ -9,7 +9,7 @@ require ( github.com/knadh/koanf/providers/rawbytes v0.1.0 github.com/knadh/koanf/v2 v2.0.1 github.com/oklog/ulid/v2 v2.1.0 - github.com/open-telemetry/opamp-go v0.6.0 + github.com/open-telemetry/opamp-go v0.8.0 go.opentelemetry.io/collector/config/configtls v0.85.0 go.uber.org/zap v1.26.0 ) diff --git a/cmd/opampsupervisor/go.sum b/cmd/opampsupervisor/go.sum index d08d60f0b23f..74d6ab0d3bbf 100644 --- a/cmd/opampsupervisor/go.sum +++ b/cmd/opampsupervisor/go.sum @@ -29,8 +29,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ= -github.com/open-telemetry/opamp-go v0.6.0 h1:pTnVvIp9FT3qEQ8P+evENhvaQGkF8i6vu73gawBPRLQ= -github.com/open-telemetry/opamp-go v0.6.0/go.mod h1:IMdeuHGVc5CjKSu5/oNV0o+UmiXuahoHvoZ4GOmAI9M= +github.com/open-telemetry/opamp-go v0.8.0 h1:ub2j96T3GzxCD5R+VDtN6iPUv4k2jgdyISi1d1BZ89I= +github.com/open-telemetry/opamp-go v0.8.0/go.mod h1:IMdeuHGVc5CjKSu5/oNV0o+UmiXuahoHvoZ4GOmAI9M= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= From 2d45ad6812303cabec721a77ec35e75cf5f9d6e3 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Tue, 19 Sep 2023 10:13:01 -0700 Subject: [PATCH 7/9] [receiver/awscontainerinsights] HOST_PROC usage (#26477) Remove the need to set the environment variable HOST_PROC as part of the awscontainerinsightsreceiver #24777 --- ...e-envmap-awscontainerinsightsreceiver.yaml | 27 ++++++++++++++++ .../internal/host/nodeCapacity.go | 32 ++++++++----------- .../internal/host/nodeCapacity_test.go | 19 ++++------- 3 files changed, 47 insertions(+), 31 deletions(-) create mode 100644 .chloggen/use-envmap-awscontainerinsightsreceiver.yaml diff --git a/.chloggen/use-envmap-awscontainerinsightsreceiver.yaml b/.chloggen/use-envmap-awscontainerinsightsreceiver.yaml new file mode 100644 index 000000000000..539732084bd0 --- /dev/null +++ b/.chloggen/use-envmap-awscontainerinsightsreceiver.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: receiver/awscontainerinsightsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove the need to set an env var in the receiver to get CPU and memory info + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [24777] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go index 7e801626f552..8c77d4634831 100644 --- a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go +++ b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity.go @@ -4,18 +4,15 @@ package host // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/host" import ( - "fmt" + "context" "os" + "github.com/shirou/gopsutil/v3/common" "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/mem" "go.uber.org/zap" ) -const ( - goPSUtilProcDirEnv = "HOST_PROC" -) - type nodeCapacityProvider interface { getMemoryCapacity() int64 getNumCores() int64 @@ -30,8 +27,8 @@ type nodeCapacity struct { osLstat func(name string) (os.FileInfo, error) // osSetenv sets the value of the environment variable named by the key osSetenv func(key string, value string) error - virtualMemory func() (*mem.VirtualMemoryStat, error) - cpuInfo func() ([]cpu.InfoStat, error) + virtualMemory func(ctx context.Context) (*mem.VirtualMemoryStat, error) + cpuInfo func(ctx context.Context) ([]cpu.InfoStat, error) } type nodeCapacityOption func(*nodeCapacity) @@ -41,8 +38,8 @@ func newNodeCapacity(logger *zap.Logger, options ...nodeCapacityOption) (nodeCap logger: logger, osLstat: os.Lstat, osSetenv: os.Setenv, - virtualMemory: mem.VirtualMemory, - cpuInfo: cpu.Info, + virtualMemory: mem.VirtualMemoryWithContext, + cpuInfo: cpu.InfoWithContext, } for _, opt := range options { @@ -52,17 +49,16 @@ func newNodeCapacity(logger *zap.Logger, options ...nodeCapacityOption) (nodeCap if _, err := nc.osLstat(hostProc); os.IsNotExist(err) { return nil, err } - if err := nc.osSetenv(goPSUtilProcDirEnv, hostProc); err != nil { - return nil, fmt.Errorf("NodeCapacity cannot set goPSUtilProcDirEnv to %s: %w", hostProc, err) - } + envMap := common.EnvMap{common.HostProcEnvKey: hostProc} + ctx := context.WithValue(context.Background(), common.EnvKey, envMap) - nc.parseCPU() - nc.parseMemory() + nc.parseCPU(ctx) + nc.parseMemory(ctx) return nc, nil } -func (nc *nodeCapacity) parseMemory() { - if memStats, err := nc.virtualMemory(); err == nil { +func (nc *nodeCapacity) parseMemory(ctx context.Context) { + if memStats, err := nc.virtualMemory(ctx); err == nil { nc.memCapacity = int64(memStats.Total) } else { // If any error happen, then there will be no mem utilization metrics @@ -70,8 +66,8 @@ func (nc *nodeCapacity) parseMemory() { } } -func (nc *nodeCapacity) parseCPU() { - if cpuInfos, err := nc.cpuInfo(); err == nil { +func (nc *nodeCapacity) parseCPU(ctx context.Context) { + if cpuInfos, err := nc.cpuInfo(ctx); err == nil { numCores := len(cpuInfos) nc.cpuCapacity = int64(numCores) } else { diff --git a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go index e04d7c20e86d..fc903775f0f1 100644 --- a/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go +++ b/receiver/awscontainerinsightreceiver/internal/host/nodeCapacity_test.go @@ -4,6 +4,7 @@ package host import ( + "context" "errors" "os" "testing" @@ -31,28 +32,20 @@ func TestNodeCapacity(t *testing.T) { return nil, nil } } - setEnvOption := func(nc *nodeCapacity) { - nc.osSetenv = func(key, value string) error { - return errors.New("error") - } - } - nc, err = newNodeCapacity(zap.NewNop(), lstatOption, setEnvOption) - assert.Nil(t, nc) - assert.NotNil(t, err) // can't parse cpu and mem info - setEnvOption = func(nc *nodeCapacity) { + setEnvOption := func(nc *nodeCapacity) { nc.osSetenv = func(key, value string) error { return nil } } virtualMemOption := func(nc *nodeCapacity) { - nc.virtualMemory = func() (*mem.VirtualMemoryStat, error) { + nc.virtualMemory = func(ctx context.Context) (*mem.VirtualMemoryStat, error) { return nil, errors.New("error") } } cpuInfoOption := func(nc *nodeCapacity) { - nc.cpuInfo = func() ([]cpu.InfoStat, error) { + nc.cpuInfo = func(ctx context.Context) ([]cpu.InfoStat, error) { return nil, errors.New("error") } } @@ -64,14 +57,14 @@ func TestNodeCapacity(t *testing.T) { // normal case where everything is working virtualMemOption = func(nc *nodeCapacity) { - nc.virtualMemory = func() (*mem.VirtualMemoryStat, error) { + nc.virtualMemory = func(ctx context.Context) (*mem.VirtualMemoryStat, error) { return &mem.VirtualMemoryStat{ Total: 1024, }, nil } } cpuInfoOption = func(nc *nodeCapacity) { - nc.cpuInfo = func() ([]cpu.InfoStat, error) { + nc.cpuInfo = func(ctx context.Context) ([]cpu.InfoStat, error) { return []cpu.InfoStat{ {}, {}, From 76687fb24508a44acc0db1bf42b9ee5efa05934a Mon Sep 17 00:00:00 2001 From: Harry Date: Tue, 19 Sep 2023 13:07:13 -0700 Subject: [PATCH 8/9] [exporter/awsxray] Refactor fault/error logic (#26681) **Description:** This PR just refactors the logic in the awsxrayexporter as it was hard to follow the logical flow of the code. No functional changes. --- .../internal/translator/cause.go | 48 ++++++++----------- 1 file changed, 19 insertions(+), 29 deletions(-) diff --git a/exporter/awsxrayexporter/internal/translator/cause.go b/exporter/awsxrayexporter/internal/translator/cause.go index b5d477a0e282..1e99edd6a4ec 100644 --- a/exporter/awsxrayexporter/internal/translator/cause.go +++ b/exporter/awsxrayexporter/internal/translator/cause.go @@ -120,39 +120,29 @@ func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource p val, ok := span.Attributes().Get(conventions.AttributeHTTPStatusCode) - switch { // The segment status for http spans will be based on their http.statuscode as we found some http // spans does not fill with status.Code() but always filled with http.statuscode - case ok: - code := val.Int() - // We only differentiate between faults (server errors) and errors (client errors) for HTTP spans. - switch { - case code >= 400 && code <= 499: - isError = true - isFault = false - if code == 429 { - isThrottle = true - } - case code >= 500 && code <= 599: - isError = false - isThrottle = false - isFault = true - case status.Code() == ptrace.StatusCodeError: - isError = false - isThrottle = false + var code int64 + if ok { + code = val.Int() + } + + // Default values + isThrottle = false + isError = false + isFault = false + + switch { + case !ok || code < 400 || code > 599: + if status.Code() == ptrace.StatusCodeError { isFault = true - default: - isError = false - isThrottle = false - isFault = false } - case status.Code() != ptrace.StatusCodeError: - isError = false - isThrottle = false - isFault = false - default: - isError = false - isThrottle = false + case code >= 400 && code <= 499: + isError = true + if code == 429 { + isThrottle = true + } + case code >= 500 && code <= 599: isFault = true } From 12760ab56d0724b4cec8315c2c6329a76c9176df Mon Sep 17 00:00:00 2001 From: bryan-aguilar <46550959+bryan-aguilar@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:10:57 -0700 Subject: [PATCH 9/9] [chore] re-add myself as awsemfexporter codeowner. (#27009) **Description:** Original merged approval PR: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/26100 PR that inadvertantly removed me: https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/26556 --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a1e0f268eedb..49b7b1816d14 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -42,7 +42,7 @@ examples/demo/ @open-te exporter/alibabacloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @shabicheng @kongluoxing @qiansheng91 exporter/awscloudwatchlogsexporter/ @open-telemetry/collector-contrib-approvers @boostchicken @bryan-aguilar @rapphil -exporter/awsemfexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @shaochengwang @mxiamxia +exporter/awsemfexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @shaochengwang @mxiamxia @bryan-aguilar exporter/awskinesisexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @MovieStoreGuy exporter/awss3exporter/ @open-telemetry/collector-contrib-approvers @atoulme @pdelewski exporter/awsxrayexporter/ @open-telemetry/collector-contrib-approvers @wangzlei @srprash