diff --git a/.chloggen/feat_contexts-take-time.yaml b/.chloggen/feat_contexts-take-time.yaml new file mode 100755 index 000000000000..3d8e12919537 --- /dev/null +++ b/.chloggen/feat_contexts-take-time.yaml @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: 'pkg/ottl' + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Update contexts to set and get time.Time" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [22010] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/.chloggen/ottl-structtag-cleanup.yaml b/.chloggen/ottl-structtag-cleanup.yaml new file mode 100755 index 000000000000..8e62c5845f7c --- /dev/null +++ b/.chloggen/ottl-structtag-cleanup.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove support for `ottlarg`. The struct's field order is now the function parameter order. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [25705] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/pkg-stanza-extract-trim-split.yaml b/.chloggen/pkg-stanza-extract-trim-split.yaml new file mode 100755 index 000000000000..651ae0e0b097 --- /dev/null +++ b/.chloggen/pkg-stanza-extract-trim-split.yaml @@ -0,0 +1,30 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/stanza + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Make trim func composable + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [26536] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + - Adds trim.WithFunc to allow trim funcs to wrap bufio.SplitFuncs. + - Removes trim.Func from split.Config.Func. Use trim.WithFunc instead. + - Removes trim.Func from flush.WithPeriod. Use trim.WithFunc instead. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/.chloggen/redisreceiver_cmd_latency.yaml b/.chloggen/redisreceiver_cmd_latency.yaml new file mode 100755 index 000000000000..6e2d632bdc9b --- /dev/null +++ b/.chloggen/redisreceiver_cmd_latency.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: redisreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added `redis.cmd.latency` metric. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [6942] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/splunkent-client.yaml b/.chloggen/splunkent-client.yaml new file mode 100755 index 000000000000..e508362cc1bd --- /dev/null +++ b/.chloggen/splunkent-client.yaml @@ -0,0 +1,20 @@ +# Use this changelog template to create an entry for release notes. +# If your change doesn't affect end users, such as a test fix or a tooling change, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: splunkentreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "adding component logic to splunkenterprise receiver" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [12667] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: diff --git a/cmd/checkapi/allowlist.txt b/cmd/checkapi/allowlist.txt index 5fdd4c40dd88..516f9cbc08e2 100644 --- a/cmd/checkapi/allowlist.txt +++ b/cmd/checkapi/allowlist.txt @@ -13,7 +13,6 @@ exporter/sentryexporter exporter/sumologicexporter extension/observer/ecsobserver extension/observer -extension/observer/k8sobserver processor/groupbyattrsprocessor processor/groupbytraceprocessor processor/probabilisticsamplerprocessor diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 03686e6218f5..a78e63371511 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -267,7 +267,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/aliyun/aliyun-log-go-sdk v0.1.54 // indirect github.com/andybalholm/brotli v1.0.5 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect github.com/apache/pulsar-client-go v0.8.1 // indirect github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e // indirect diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index 48520302a04f..893c5b3ff529 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -1018,8 +1018,8 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 854c4edf4088..ae9f57648abc 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -288,7 +288,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/aliyun/aliyun-log-go-sdk v0.1.54 // indirect github.com/andybalholm/brotli v1.0.5 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect github.com/apache/pulsar-client-go v0.8.1 // indirect github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index f227649a8946..fc579efef1ec 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -962,8 +962,8 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= diff --git a/cmd/oteltestbedcol/go.mod b/cmd/oteltestbedcol/go.mod index 374f8e4cdd32..4419009f0580 100644 --- a/cmd/oteltestbedcol/go.mod +++ b/cmd/oteltestbedcol/go.mod @@ -69,7 +69,7 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/alecthomas/participle/v2 v2.1.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go v1.45.7 // indirect diff --git a/cmd/oteltestbedcol/go.sum b/cmd/oteltestbedcol/go.sum index a9d271349875..bf5b1b9923f2 100644 --- a/cmd/oteltestbedcol/go.sum +++ b/cmd/oteltestbedcol/go.sum @@ -824,8 +824,8 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= diff --git a/cmd/telemetrygen/internal/common/config.go b/cmd/telemetrygen/internal/common/config.go index 9850b3cce754..014a1374450f 100644 --- a/cmd/telemetrygen/internal/common/config.go +++ b/cmd/telemetrygen/internal/common/config.go @@ -83,11 +83,13 @@ func (c *Config) CommonFlags(fs *pflag.FlagSet) { // custom headers c.Headers = make(map[string]string) - fs.Var(&c.Headers, "otlp-header", "Custom header to be passed along with each OTLP request. The value is expected in the format key=value."+ + fs.Var(&c.Headers, "otlp-header", "Custom header to be passed along with each OTLP request. The value is expected in the format key=\"value\"."+ + "Note you may need to escape the quotes when using the tool from a cli."+ "Flag may be repeated to set multiple headers (e.g -otlp-header key1=value1 -otlp-header key2=value2)") // custom resource attributes c.ResourceAttributes = make(map[string]string) fs.Var(&c.ResourceAttributes, "otlp-attributes", "Custom resource attributes to use. The value is expected in the format key=\"value\"."+ + "Note you may need to escape the quotes when using the tool from a cli."+ "Flag may be repeated to set multiple attributes (e.g -otlp-attributes key1=\"value1\" -otlp-attributes key2=\"value2\")") } diff --git a/examples/demo/.env b/examples/demo/.env index 5935660c2b80..84eac1c06ebc 100644 --- a/examples/demo/.env +++ b/examples/demo/.env @@ -1,2 +1,2 @@ -OTELCOL_IMG=otel/opentelemetry-collector:0.67.0 +OTELCOL_IMG=otel/opentelemetry-collector:0.85.0 OTELCOL_ARGS= diff --git a/examples/demo/README.md b/examples/demo/README.md index 206ef75a3086..0ed64643e40e 100644 --- a/examples/demo/README.md +++ b/examples/demo/README.md @@ -24,7 +24,7 @@ This demo uses `docker-compose` and by default runs against the to the `examples/demo` folder and run: ```shell -docker-compose up -d +docker compose up -d ``` The demo exposes the following backends: diff --git a/examples/demo/otel-collector-config.yaml b/examples/demo/otel-collector-config.yaml index 38f9415203f6..b6a9ea1ec59d 100644 --- a/examples/demo/otel-collector-config.yaml +++ b/examples/demo/otel-collector-config.yaml @@ -15,8 +15,8 @@ exporters: endpoint: "http://zipkin-all-in-one:9411/api/v2/spans" format: proto - jaeger: - endpoint: jaeger-all-in-one:14250 + otlp: + endpoint: jaeger-all-in-one:4317 tls: insecure: true @@ -36,7 +36,7 @@ service: traces: receivers: [otlp] processors: [batch] - exporters: [logging, zipkin, jaeger] + exporters: [logging, zipkin, otlp] metrics: receivers: [otlp] processors: [batch] diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 8fabe006d0d4..bab4b2b9e98e 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -66,7 +66,7 @@ require ( github.com/DataDog/zstd v1.5.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 16c8cb1f12a3..f60e4373644e 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -108,8 +108,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= diff --git a/extension/observer/k8sobserver/k8s_fixtures_test.go b/extension/observer/k8sobserver/k8s_fixtures_test.go index ad6a8990bca0..10153c4d6a52 100644 --- a/extension/observer/k8sobserver/k8s_fixtures_test.go +++ b/extension/observer/k8sobserver/k8s_fixtures_test.go @@ -9,8 +9,8 @@ import ( "k8s.io/apimachinery/pkg/types" ) -// NewPod is a helper function for creating Pods for testing. -func NewPod(name, host string) *v1.Pod { +// newPod is a helper function for creating Pods for testing. +func newPod(name, host string) *v1.Pod { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: "default", @@ -38,7 +38,7 @@ func NewPod(name, host string) *v1.Pod { return pod } -var pod1V1 = NewPod("pod1", "localhost") +var pod1V1 = newPod("pod1", "localhost") var pod1V2 = func() *v1.Pod { pod := pod1V1.DeepCopy() pod.Labels["pod-version"] = "2" @@ -85,7 +85,7 @@ var container2StatusRunning = v1.ContainerStatus{ } var podWithNamedPorts = func() *v1.Pod { - pod := NewPod("pod-2", "localhost") + pod := newPod("pod-2", "localhost") pod.Labels = map[string]string{ "env": "prod", } @@ -104,8 +104,8 @@ func pointerBool(val bool) *bool { return &val } -// NewNode is a helper function for creating Nodes for testing. -func NewNode(name, hostname string) *v1.Node { +// newNode is a helper function for creating Nodes for testing. +func newNode(name, hostname string) *v1.Node { return &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Namespace: "namespace", @@ -147,7 +147,7 @@ func NewNode(name, hostname string) *v1.Node { } } -var node1V1 = NewNode("node1", "localhost") +var node1V1 = newNode("node1", "localhost") var node1V2 = func() *v1.Node { node := node1V1.DeepCopy() node.Labels["node-version"] = "2" diff --git a/extension/observer/k8sobserver/node_endpoint_test.go b/extension/observer/k8sobserver/node_endpoint_test.go index e547581d33c6..cba65151b03e 100644 --- a/extension/observer/k8sobserver/node_endpoint_test.go +++ b/extension/observer/k8sobserver/node_endpoint_test.go @@ -29,6 +29,6 @@ func TestNodeObjectToK8sNodeEndpoint(t *testing.T) { }, } - endpoint := convertNodeToEndpoint("namespace", NewNode("name", "hostname")) + endpoint := convertNodeToEndpoint("namespace", newNode("name", "hostname")) require.Equal(t, expectedNode, endpoint) } diff --git a/go.mod b/go.mod index 050426a1e450..9a2d22257227 100644 --- a/go.mod +++ b/go.mod @@ -264,7 +264,7 @@ require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/aliyun/aliyun-log-go-sdk v0.1.54 // indirect github.com/andybalholm/brotli v1.0.5 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect github.com/apache/pulsar-client-go v0.8.1 // indirect github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e // indirect diff --git a/go.sum b/go.sum index 538377fcbaf7..142b3579ebd7 100644 --- a/go.sum +++ b/go.sum @@ -1020,8 +1020,8 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= diff --git a/internal/filter/filterottl/functions.go b/internal/filter/filterottl/functions.go index ef08af2527b3..6719adfa01a2 100644 --- a/internal/filter/filterottl/functions.go +++ b/internal/filter/filterottl/functions.go @@ -70,8 +70,8 @@ func dropFn[K any]() (ottl.ExprFunc[K], error) { } type hasAttributeOnDatapointArguments struct { - Key string `ottlarg:"0"` - ExpectedVal string `ottlarg:"1"` + Key string + ExpectedVal string } func newHasAttributeOnDatapointFactory() ottl.Factory[ottlmetric.TransformContext] { @@ -95,7 +95,7 @@ func hasAttributeOnDatapoint(key string, expectedVal string) (ottl.ExprFunc[ottl } type hasAttributeKeyOnDatapointArguments struct { - Key string `ottlarg:"0"` + Key string } func newHasAttributeKeyOnDatapointFactory() ottl.Factory[ottlmetric.TransformContext] { diff --git a/internal/filter/go.mod b/internal/filter/go.mod index aedd2dd83a88..99c4c6226500 100644 --- a/internal/filter/go.mod +++ b/internal/filter/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter go 1.20 require ( - github.com/antonmedv/expr v1.15.1 + github.com/antonmedv/expr v1.15.2 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.85.0 diff --git a/internal/filter/go.sum b/internal/filter/go.sum index e0dd467b6cb1..a5566b6ac740 100644 --- a/internal/filter/go.sum +++ b/internal/filter/go.sum @@ -11,8 +11,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/pkg/ottl/contexts/internal/span.go b/pkg/ottl/contexts/internal/span.go index df96c007fff6..bc7b67f9e79d 100644 --- a/pkg/ottl/contexts/internal/span.go +++ b/pkg/ottl/contexts/internal/span.go @@ -82,6 +82,10 @@ func SpanPathGetSetter[K SpanContext](path []ottl.Field) (ottl.GetSetter[K], err return accessStartTimeUnixNano[K](), nil case "end_time_unix_nano": return accessEndTimeUnixNano[K](), nil + case "start_time": + return accessStartTime[K](), nil + case "end_time": + return accessEndTime[K](), nil case "attributes": mapKeys := path[0].Keys if mapKeys == nil { @@ -388,6 +392,34 @@ func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { } } +func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { + return ottl.StandardGetSetter[K]{ + Getter: func(ctx context.Context, tCtx K) (interface{}, error) { + return tCtx.GetSpan().StartTimestamp().AsTime(), nil + }, + Setter: func(ctx context.Context, tCtx K, val interface{}) error { + if i, ok := val.(time.Time); ok { + tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(i)) + } + return nil + }, + } +} + +func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { + return ottl.StandardGetSetter[K]{ + Getter: func(ctx context.Context, tCtx K) (interface{}, error) { + return tCtx.GetSpan().EndTimestamp().AsTime(), nil + }, + Setter: func(ctx context.Context, tCtx K, val interface{}) error { + if i, ok := val.(time.Time); ok { + tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(i)) + } + return nil + }, + } +} + func accessAttributes[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ Getter: func(ctx context.Context, tCtx K) (interface{}, error) { diff --git a/pkg/ottl/contexts/internal/span_test.go b/pkg/ottl/contexts/internal/span_test.go index d507f5d695bc..8178f5b42ecd 100644 --- a/pkg/ottl/contexts/internal/span_test.go +++ b/pkg/ottl/contexts/internal/span_test.go @@ -650,6 +650,32 @@ func TestSpanPathGetSetter(t *testing.T) { span.Status().SetMessage("bad span") }, }, + { + name: "start_time", + path: []ottl.Field{ + { + Name: "start_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 100000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(span ptrace.Span) { + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, + { + name: "end_time", + path: []ottl.Field{ + { + Name: "end_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 500000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(span ptrace.Span) { + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/ottl/contexts/ottldatapoint/README.md b/pkg/ottl/contexts/ottldatapoint/README.md index c50c7a69a6b1..9e32c7dbc462 100644 --- a/pkg/ottl/contexts/ottldatapoint/README.md +++ b/pkg/ottl/contexts/ottldatapoint/README.md @@ -32,6 +32,8 @@ The following paths are supported. | negative.offset | the offset of the negative buckets of the data point being processed | int64 | | negative.bucket_counts | the bucket_counts of the negative buckets of the data point being processed | uint64 | | start_time_unix_nano | the start time in unix nano of the data point being processed | int64 | +| time | the time in `time.Time` of the data point being processed | `time.Time` | +| start_time | the start time in `time.Time` of the data point being processed | `time.Time` | | time_unix_nano | the time in unix nano of the data point being processed | int64 | | value_double | the double value of the data point being processed | float64 | | value_int | the int value of the data point being processed | int64 | diff --git a/pkg/ottl/contexts/ottldatapoint/datapoint.go b/pkg/ottl/contexts/ottldatapoint/datapoint.go index 87f9fc76e016..95ea4ca86066 100644 --- a/pkg/ottl/contexts/ottldatapoint/datapoint.go +++ b/pkg/ottl/contexts/ottldatapoint/datapoint.go @@ -149,6 +149,10 @@ func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], erro return accessStartTimeUnixNano(), nil case "time_unix_nano": return accessTimeUnixNano(), nil + case "start_time": + return accessStartTime(), nil + case "time": + return accessTime(), nil case "value_double": return accessDoubleValue(), nil case "value_int": @@ -323,6 +327,39 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } } +func accessStartTime() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { + switch tCtx.GetDataPoint().(type) { + case pmetric.NumberDataPoint: + return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime(), nil + case pmetric.HistogramDataPoint: + return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).StartTimestamp().AsTime(), nil + case pmetric.ExponentialHistogramDataPoint: + return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).StartTimestamp().AsTime(), nil + case pmetric.SummaryDataPoint: + return tCtx.GetDataPoint().(pmetric.SummaryDataPoint).StartTimestamp().AsTime(), nil + } + return nil, nil + }, + Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error { + if newTime, ok := val.(time.Time); ok { + switch tCtx.GetDataPoint().(type) { + case pmetric.NumberDataPoint: + tCtx.GetDataPoint().(pmetric.NumberDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(newTime)) + case pmetric.HistogramDataPoint: + tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(newTime)) + case pmetric.ExponentialHistogramDataPoint: + tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(newTime)) + case pmetric.SummaryDataPoint: + tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(newTime)) + } + } + return nil + }, + } +} + func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { @@ -356,6 +393,39 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } } +func accessTime() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { + switch tCtx.GetDataPoint().(type) { + case pmetric.NumberDataPoint: + return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime(), nil + case pmetric.HistogramDataPoint: + return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Timestamp().AsTime(), nil + case pmetric.ExponentialHistogramDataPoint: + return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Timestamp().AsTime(), nil + case pmetric.SummaryDataPoint: + return tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Timestamp().AsTime(), nil + } + return nil, nil + }, + Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error { + if newTime, ok := val.(time.Time); ok { + switch tCtx.GetDataPoint().(type) { + case pmetric.NumberDataPoint: + tCtx.GetDataPoint().(pmetric.NumberDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(newTime)) + case pmetric.HistogramDataPoint: + tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(newTime)) + case pmetric.ExponentialHistogramDataPoint: + tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(newTime)) + case pmetric.SummaryDataPoint: + tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(newTime)) + } + } + return nil + }, + } +} + func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { diff --git a/pkg/ottl/contexts/ottldatapoint/datapoint_test.go b/pkg/ottl/contexts/ottldatapoint/datapoint_test.go index 20bca2da04f1..8bc0b26b7202 100644 --- a/pkg/ottl/contexts/ottldatapoint/datapoint_test.go +++ b/pkg/ottl/contexts/ottldatapoint/datapoint_test.go @@ -120,6 +120,32 @@ func Test_newPathGetSetter_NumberDataPoint(t *testing.T) { datapoint.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) }, }, + { + name: "start_time", + path: []ottl.Field{ + { + Name: "start_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 100000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 86400000000000, time.UTC), + modified: func(datapoint pmetric.NumberDataPoint) { + datapoint.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(86400, 0))) + }, + }, + { + name: "time", + path: []ottl.Field{ + { + Name: "time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 500000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(datapoint pmetric.NumberDataPoint) { + datapoint.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, { name: "time_unix_nano", path: []ottl.Field{ @@ -518,6 +544,7 @@ func Test_newPathGetSetter_NumberDataPoint(t *testing.T) { tt.modified(exNumberDataPoint) assert.Equal(t, exNumberDataPoint, numberDataPoint) + }) } } diff --git a/pkg/ottl/contexts/ottllog/README.md b/pkg/ottl/contexts/ottllog/README.md index fc4a7fa4c148..33c462531c17 100644 --- a/pkg/ottl/contexts/ottllog/README.md +++ b/pkg/ottl/contexts/ottllog/README.md @@ -31,6 +31,8 @@ The following paths are supported. | span_id.string | a string representation of the span id | string | | time_unix_nano | the time in unix nano of the log being processed | int64 | | observed_time_unix_nano | the observed time in unix nano of the log being processed | int64 | +| time | the time in `time.Time` of the log being processed | `time.Time` | +| observed_time | the observed time in `time.Time` of the log being processed | `time.Time` | | severity_number | the severity numbner of the log being processed | int64 | | severity_text | the severity text of the log being processed | string | | body | the body of the log being processed | any | diff --git a/pkg/ottl/contexts/ottllog/log.go b/pkg/ottl/contexts/ottllog/log.go index 7542cf81df88..7ff37d4cbed1 100644 --- a/pkg/ottl/contexts/ottllog/log.go +++ b/pkg/ottl/contexts/ottllog/log.go @@ -148,6 +148,10 @@ func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], erro return accessTimeUnixNano(), nil case "observed_time_unix_nano": return accessObservedTimeUnixNano(), nil + case "time": + return accessTime(), nil + case "observed_time": + return accessObservedTime(), nil case "severity_number": return accessSeverityNumber(), nil case "severity_text": @@ -245,6 +249,34 @@ func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } } +func accessTime() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { + return tCtx.GetLogRecord().Timestamp().AsTime(), nil + }, + Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error { + if i, ok := val.(time.Time); ok { + tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(i)) + } + return nil + }, + } +} + +func accessObservedTime() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { + return tCtx.GetLogRecord().ObservedTimestamp().AsTime(), nil + }, + Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error { + if i, ok := val.(time.Time); ok { + tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(i)) + } + return nil + }, + } +} + func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { diff --git a/pkg/ottl/contexts/ottllog/log_test.go b/pkg/ottl/contexts/ottllog/log_test.go index a68b2ff3dc1a..555e10f3f28b 100644 --- a/pkg/ottl/contexts/ottllog/log_test.go +++ b/pkg/ottl/contexts/ottllog/log_test.go @@ -57,6 +57,19 @@ func Test_newPathGetSetter(t *testing.T) { modified func(log plog.LogRecord, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) bodyType string }{ + { + name: "time", + path: []ottl.Field{ + { + Name: "time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 100000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(log plog.LogRecord, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + log.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, { name: "time_unix_nano", path: []ottl.Field{ @@ -83,6 +96,19 @@ func Test_newPathGetSetter(t *testing.T) { log.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) }, }, + { + name: "observed time", + path: []ottl.Field{ + { + Name: "observed_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 500000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(log plog.LogRecord, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + log.SetObservedTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, { name: "severity_number", path: []ottl.Field{ diff --git a/pkg/ottl/contexts/ottlspan/README.md b/pkg/ottl/contexts/ottlspan/README.md index a26d18bf1fdd..6901c6c6ac88 100644 --- a/pkg/ottl/contexts/ottlspan/README.md +++ b/pkg/ottl/contexts/ottlspan/README.md @@ -42,6 +42,9 @@ The following paths are supported. | kind.deprecated_string | the kind of the span in deprecated string format. Valid values are `SPAN_KIND_UNSPECIFIED`, `SPAN_KIND_INTERNAL`, `SPAN_KIND_SERVER`, `SPAN_KIND_CLIENT`, `SPAN_KIND_PRODUCER`, and `SPAN_KIND_CONSUMER`. When setting, if an invalid value is used `SPAN_KIND_UNSPECIFIED` will be set. This accessor will eventually be removed, use `kind` or `kind.string` instead. | string | | start_time_unix_nano | the start time in unix nano of the span | int64 | | end_time_unix_nano | the end time in unix nano of the span | int64 | +| start_time | the start time in `time.Time` of the span | `time.Time` | +| end_time | the end time in `time.Time` of the span | `time.Time` | +| | dropped_attributes_count | the dropped attributes count of the span | int64 | | events | the events of the span | ptrace.SpanEventSlice | | dropped_events_count | the dropped events count of the span | int64 | diff --git a/pkg/ottl/contexts/ottlspan/span_test.go b/pkg/ottl/contexts/ottlspan/span_test.go index acd72af66cce..884d49eecc76 100644 --- a/pkg/ottl/contexts/ottlspan/span_test.go +++ b/pkg/ottl/contexts/ottlspan/span_test.go @@ -275,6 +275,32 @@ func Test_newPathGetSetter(t *testing.T) { span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) }, }, + { + name: "start_time", + path: []ottl.Field{ + { + Name: "start_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 100000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, + { + name: "end_time", + path: []ottl.Field{ + { + Name: "end_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 500000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, { name: "attributes", path: []ottl.Field{ @@ -728,6 +754,32 @@ func Test_newPathGetSetter(t *testing.T) { pcommon.NewResource().CopyTo(resource) }, }, + { + name: "start_time", + path: []ottl.Field{ + { + Name: "start_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 100000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, + { + name: "end_time", + path: []ottl.Field{ + { + Name: "end_time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 500000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/ottl/contexts/ottlspanevent/README.md b/pkg/ottl/contexts/ottlspanevent/README.md index 935ce9dd5915..0de4ddb18774 100644 --- a/pkg/ottl/contexts/ottlspanevent/README.md +++ b/pkg/ottl/contexts/ottlspanevent/README.md @@ -24,6 +24,7 @@ The following paths are supported. | attributes | attributes of the span event being processed | pcommon.Map | | attributes\[""\] | the value of the attribute of the span event being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | | time_unix_nano | time_unix_nano of the span event being processed | int64 | +| time | time of the span event being processed | `time.Time` | | name | name of the span event being processed | string | | dropped_attributes_count | dropped_attributes_count of the span event being processed | int64 | diff --git a/pkg/ottl/contexts/ottlspanevent/span_events.go b/pkg/ottl/contexts/ottlspanevent/span_events.go index 5c5ab5dba241..3ae20bc63ae9 100644 --- a/pkg/ottl/contexts/ottlspanevent/span_events.go +++ b/pkg/ottl/contexts/ottlspanevent/span_events.go @@ -125,6 +125,8 @@ func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], erro return internal.SpanPathGetSetter[TransformContext](path[1:]) case "time_unix_nano": return accessSpanEventTimeUnixNano(), nil + case "time": + return accessSpanEventTime(), nil case "name": return accessSpanEventName(), nil case "attributes": @@ -179,6 +181,20 @@ func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } } +func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { + return tCtx.GetSpanEvent().Timestamp().AsTime(), nil + }, + Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error { + if newTimestamp, ok := val.(time.Time); ok { + tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(newTimestamp)) + } + return nil + }, + } +} + func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) { diff --git a/pkg/ottl/contexts/ottlspanevent/span_events_test.go b/pkg/ottl/contexts/ottlspanevent/span_events_test.go index 5e25fc6a4a19..a96be125b274 100644 --- a/pkg/ottl/contexts/ottlspanevent/span_events_test.go +++ b/pkg/ottl/contexts/ottlspanevent/span_events_test.go @@ -54,6 +54,19 @@ func Test_newPathGetSetter(t *testing.T) { newVal interface{} modified func(spanEvent ptrace.SpanEvent, span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) }{ + { + name: "span event time", + path: []ottl.Field{ + { + Name: "time", + }, + }, + orig: time.Date(1970, 1, 1, 0, 0, 0, 100000000, time.UTC), + newVal: time.Date(1970, 1, 1, 0, 0, 0, 200000000, time.UTC), + modified: func(spanEvent ptrace.SpanEvent, span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource, cache pcommon.Map) { + spanEvent.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) + }, + }, { name: "cache", path: []ottl.Field{ diff --git a/pkg/ottl/expression.go b/pkg/ottl/expression.go index c83b5f90b560..18e5ffad56ce 100644 --- a/pkg/ottl/expression.go +++ b/pkg/ottl/expression.go @@ -280,15 +280,7 @@ func (g StandardFunctionGetter[K]) Get(args Arguments) (Expr[K], error) { } for i := 0; i < fArgsVal.NumField(); i++ { field := argsVal.Field(i) - argIndex, err := getArgumentIndex(i, argsVal) - if err != nil { - return Expr[K]{}, err - } - fArgIndex, err := getArgumentIndex(argIndex, fArgsVal) - if err != nil { - return Expr[K]{}, err - } - fArgsVal.Field(fArgIndex).Set(field) + fArgsVal.Field(i).Set(field) } fn, err := g.fact.CreateFunction(g.fCtx, fArgs) if err != nil { diff --git a/pkg/ottl/expression_test.go b/pkg/ottl/expression_test.go index 605921d78b20..2324db49d619 100644 --- a/pkg/ottl/expression_test.go +++ b/pkg/ottl/expression_test.go @@ -690,11 +690,6 @@ func Test_FunctionGetter(t *testing.T) { &multipleArgsArguments{}, functionWithStringGetter, ), - createFactory[any]( - "no_struct_tag", - &noStructTagFunctionArguments{}, - functionWithStringGetter, - ), NewFactory( "cannot_create_function", &stringGetterArguments{}, @@ -708,7 +703,7 @@ func Test_FunctionGetter(t *testing.T) { Function FunctionGetter[any] } type FuncArgs struct { - Input StringGetter[any] `ottlarg:"0"` + Input StringGetter[any] } tests := []struct { name string @@ -753,18 +748,6 @@ func Test_FunctionGetter(t *testing.T) { valid: false, expectedErrorMsg: "incorrect number of arguments. Expected: 4 Received: 1", }, - { - name: "Invalid Arguments struct tag", - getter: StandardStringGetter[interface{}]{ - Getter: func(ctx context.Context, tCtx interface{}) (interface{}, error) { - return nil, nil - }, - }, - function: StandardFunctionGetter[any]{fCtx: FunctionContext{Set: componenttest.NewNopTelemetrySettings()}, fact: functions["no_struct_tag"]}, - want: "anything", - valid: false, - expectedErrorMsg: "no `ottlarg` struct tag on Arguments field \"StringArg\"", - }, { name: "Cannot create function", getter: StandardStringGetter[interface{}]{ diff --git a/pkg/ottl/functions.go b/pkg/ottl/functions.go index 46845ac85f3c..fea6c99ba81d 100644 --- a/pkg/ottl/functions.go +++ b/pkg/ottl/functions.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "reflect" - "strconv" "strings" ) @@ -47,22 +46,6 @@ func (p *Parser[K]) newFunctionCall(ed editor) (Expr[K], error) { return Expr[K]{exprFunc: fn}, err } -func getArgumentIndex(index int, args reflect.Value) (int, error) { - argsType := args.Type() - fieldTag, ok := argsType.Field(index).Tag.Lookup("ottlarg") - if !ok { - return 0, fmt.Errorf("no `ottlarg` struct tag on Arguments field %q", argsType.Field(index).Name) - } - argNum, err := strconv.Atoi(fieldTag) - if err != nil { - return 0, fmt.Errorf("ottlarg struct tag on field %q is not a valid integer: %w", argsType.Field(index).Name, err) - } - if argNum < 0 || argNum >= args.NumField() { - return 0, fmt.Errorf("ottlarg struct tag on field %q has value %d, but must be between 0 and %d", argsType.Field(index).Name, argNum, args.NumField()) - } - return argNum, nil -} - func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { if len(ed.Arguments) != argsVal.NumField() { return fmt.Errorf("incorrect number of arguments. Expected: %d Received: %d", argsVal.NumField(), len(ed.Arguments)) @@ -71,12 +54,9 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { for i := 0; i < argsVal.NumField(); i++ { field := argsVal.Field(i) fieldType := field.Type() - argNum, err := getArgumentIndex(i, argsVal) - if err != nil { - return err - } - argVal := ed.Arguments[argNum] + argVal := ed.Arguments[i] var val any + var err error switch { case strings.HasPrefix(fieldType.Name(), "FunctionGetter"): var name string diff --git a/pkg/ottl/functions_test.go b/pkg/ottl/functions_test.go index 55ca83e432cb..011c9c7e58f4 100644 --- a/pkg/ottl/functions_test.go +++ b/pkg/ottl/functions_test.go @@ -63,31 +63,6 @@ func Test_NewFunctionCall_invalid(t *testing.T) { errorFunctionArguments{}, functionThatHasAnError, ), - createFactory( - "no_struct_tag", - &noStructTagFunctionArguments{}, - functionThatHasAnError, - ), - createFactory( - "wrong_struct_tag", - &wrongTagFunctionArguments{}, - functionThatHasAnError, - ), - createFactory( - "bad_struct_tag", - &badStructTagFunctionArguments{}, - functionThatHasAnError, - ), - createFactory( - "negative_struct_tag", - &negativeStructTagFunctionArguments{}, - functionThatHasAnError, - ), - createFactory( - "out_of_bounds_struct_tag", - &outOfBoundsStructTagFunctionArguments{}, - functionThatHasAnError, - ), createFactory( "testing_unknown_function", &functionGetterArguments{}, @@ -349,61 +324,6 @@ func Test_NewFunctionCall_invalid(t *testing.T) { Function: "non_pointer", }, }, - { - name: "no struct tags", - inv: editor{ - Function: "no_struct_tag", - Arguments: []value{ - { - String: ottltest.Strp("str"), - }, - }, - }, - }, - { - name: "using the wrong struct tag", - inv: editor{ - Function: "wrong_struct_tag", - Arguments: []value{ - { - String: ottltest.Strp("str"), - }, - }, - }, - }, - { - name: "non-integer struct tags", - inv: editor{ - Function: "bad_struct_tag", - Arguments: []value{ - { - String: ottltest.Strp("str"), - }, - }, - }, - }, - { - name: "struct tag index too low", - inv: editor{ - Function: "negative_struct_tag", - Arguments: []value{ - { - String: ottltest.Strp("str"), - }, - }, - }, - }, - { - name: "struct tag index too high", - inv: editor{ - Function: "out_of_bounds_struct_tag", - Arguments: []value{ - { - String: ottltest.Strp("str"), - }, - }, - }, - }, } for _, tt := range tests { @@ -1271,7 +1191,7 @@ func functionWithErr() (ExprFunc[any], error) { } type stringSliceArguments struct { - Strings []string `ottlarg:"0"` + Strings []string } func functionWithStringSlice(strs []string) (ExprFunc[any], error) { @@ -1281,7 +1201,7 @@ func functionWithStringSlice(strs []string) (ExprFunc[any], error) { } type floatSliceArguments struct { - Floats []float64 `ottlarg:"0"` + Floats []float64 } func functionWithFloatSlice(floats []float64) (ExprFunc[interface{}], error) { @@ -1291,7 +1211,7 @@ func functionWithFloatSlice(floats []float64) (ExprFunc[interface{}], error) { } type intSliceArguments struct { - Ints []int64 `ottlarg:"0"` + Ints []int64 } func functionWithIntSlice(ints []int64) (ExprFunc[interface{}], error) { @@ -1301,7 +1221,7 @@ func functionWithIntSlice(ints []int64) (ExprFunc[interface{}], error) { } type byteSliceArguments struct { - Bytes []byte `ottlarg:"0"` + Bytes []byte } func functionWithByteSlice(bytes []byte) (ExprFunc[interface{}], error) { @@ -1311,7 +1231,7 @@ func functionWithByteSlice(bytes []byte) (ExprFunc[interface{}], error) { } type getterSliceArguments struct { - Getters []Getter[any] `ottlarg:"0"` + Getters []Getter[any] } func functionWithGetterSlice(getters []Getter[interface{}]) (ExprFunc[interface{}], error) { @@ -1321,7 +1241,7 @@ func functionWithGetterSlice(getters []Getter[interface{}]) (ExprFunc[interface{ } type stringGetterSliceArguments struct { - StringGetters []StringGetter[any] `ottlarg:"0"` + StringGetters []StringGetter[any] } func functionWithStringGetterSlice(getters []StringGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1331,7 +1251,7 @@ func functionWithStringGetterSlice(getters []StringGetter[interface{}]) (ExprFun } type durationGetterSliceArguments struct { - DurationGetters []DurationGetter[any] `ottlarg:"0"` + DurationGetters []DurationGetter[any] } func functionWithDurationGetterSlice(_ []DurationGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1341,7 +1261,7 @@ func functionWithDurationGetterSlice(_ []DurationGetter[interface{}]) (ExprFunc[ } type timeGetterSliceArguments struct { - TimeGetters []TimeGetter[any] `ottlarg:"0"` + TimeGetters []TimeGetter[any] } func functionWithTimeGetterSlice(_ []TimeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1351,7 +1271,7 @@ func functionWithTimeGetterSlice(_ []TimeGetter[interface{}]) (ExprFunc[interfac } type floatGetterSliceArguments struct { - FloatGetters []FloatGetter[any] `ottlarg:"0"` + FloatGetters []FloatGetter[any] } func functionWithFloatGetterSlice(getters []FloatGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1361,7 +1281,7 @@ func functionWithFloatGetterSlice(getters []FloatGetter[interface{}]) (ExprFunc[ } type intGetterSliceArguments struct { - IntGetters []IntGetter[any] `ottlarg:"0"` + IntGetters []IntGetter[any] } func functionWithIntGetterSlice(getters []IntGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1371,7 +1291,7 @@ func functionWithIntGetterSlice(getters []IntGetter[interface{}]) (ExprFunc[inte } type pMapGetterSliceArguments struct { - PMapGetters []PMapGetter[any] `ottlarg:"0"` + PMapGetters []PMapGetter[any] } func functionWithPMapGetterSlice(getters []PMapGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1381,7 +1301,7 @@ func functionWithPMapGetterSlice(getters []PMapGetter[interface{}]) (ExprFunc[in } type stringLikeGetterSliceArguments struct { - StringLikeGetters []StringLikeGetter[any] `ottlarg:"0"` + StringLikeGetters []StringLikeGetter[any] } func functionWithStringLikeGetterSlice(getters []StringLikeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1391,7 +1311,7 @@ func functionWithStringLikeGetterSlice(getters []StringLikeGetter[interface{}]) } type floatLikeGetterSliceArguments struct { - FloatLikeGetters []FloatLikeGetter[any] `ottlarg:"0"` + FloatLikeGetters []FloatLikeGetter[any] } func functionWithFloatLikeGetterSlice(getters []FloatLikeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1401,7 +1321,7 @@ func functionWithFloatLikeGetterSlice(getters []FloatLikeGetter[interface{}]) (E } type intLikeGetterSliceArguments struct { - IntLikeGetters []IntLikeGetter[any] `ottlarg:"0"` + IntLikeGetters []IntLikeGetter[any] } func functionWithIntLikeGetterSlice(getters []IntLikeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1411,7 +1331,7 @@ func functionWithIntLikeGetterSlice(getters []IntLikeGetter[interface{}]) (ExprF } type setterArguments struct { - SetterArg Setter[any] `ottlarg:"0"` + SetterArg Setter[any] } func functionWithSetter(Setter[interface{}]) (ExprFunc[interface{}], error) { @@ -1421,7 +1341,7 @@ func functionWithSetter(Setter[interface{}]) (ExprFunc[interface{}], error) { } type getSetterArguments struct { - GetSetterArg GetSetter[any] `ottlarg:"0"` + GetSetterArg GetSetter[any] } func functionWithGetSetter(GetSetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1431,7 +1351,7 @@ func functionWithGetSetter(GetSetter[interface{}]) (ExprFunc[interface{}], error } type getterArguments struct { - GetterArg Getter[any] `ottlarg:"0"` + GetterArg Getter[any] } func functionWithGetter(Getter[interface{}]) (ExprFunc[interface{}], error) { @@ -1441,7 +1361,7 @@ func functionWithGetter(Getter[interface{}]) (ExprFunc[interface{}], error) { } type stringGetterArguments struct { - StringGetterArg StringGetter[any] `ottlarg:"0"` + StringGetterArg StringGetter[any] } func functionWithStringGetter(StringGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1451,7 +1371,7 @@ func functionWithStringGetter(StringGetter[interface{}]) (ExprFunc[interface{}], } type durationGetterArguments struct { - DurationGetterArg DurationGetter[any] `ottlarg:"0"` + DurationGetterArg DurationGetter[any] } func functionWithDurationGetter(DurationGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1461,7 +1381,7 @@ func functionWithDurationGetter(DurationGetter[interface{}]) (ExprFunc[interface } type timeGetterArguments struct { - TimeGetterArg TimeGetter[any] `ottlarg:"0"` + TimeGetterArg TimeGetter[any] } func functionWithTimeGetter(TimeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1471,7 +1391,7 @@ func functionWithTimeGetter(TimeGetter[interface{}]) (ExprFunc[interface{}], err } type functionGetterArguments struct { - FunctionGetterArg FunctionGetter[any] `ottlarg:"0"` + FunctionGetterArg FunctionGetter[any] } func functionWithFunctionGetter(FunctionGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1481,7 +1401,7 @@ func functionWithFunctionGetter(FunctionGetter[interface{}]) (ExprFunc[interface } type stringLikeGetterArguments struct { - StringLikeGetterArg StringLikeGetter[any] `ottlarg:"0"` + StringLikeGetterArg StringLikeGetter[any] } func functionWithStringLikeGetter(StringLikeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1491,7 +1411,7 @@ func functionWithStringLikeGetter(StringLikeGetter[interface{}]) (ExprFunc[inter } type floatGetterArguments struct { - FloatGetterArg FloatGetter[any] `ottlarg:"0"` + FloatGetterArg FloatGetter[any] } func functionWithFloatGetter(FloatGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1501,7 +1421,7 @@ func functionWithFloatGetter(FloatGetter[interface{}]) (ExprFunc[interface{}], e } type floatLikeGetterArguments struct { - FloatLikeGetterArg FloatLikeGetter[any] `ottlarg:"0"` + FloatLikeGetterArg FloatLikeGetter[any] } func functionWithFloatLikeGetter(FloatLikeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1511,7 +1431,7 @@ func functionWithFloatLikeGetter(FloatLikeGetter[interface{}]) (ExprFunc[interfa } type intGetterArguments struct { - IntGetterArg IntGetter[any] `ottlarg:"0"` + IntGetterArg IntGetter[any] } func functionWithIntGetter(IntGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1521,7 +1441,7 @@ func functionWithIntGetter(IntGetter[interface{}]) (ExprFunc[interface{}], error } type intLikeGetterArguments struct { - IntLikeGetterArg IntLikeGetter[any] `ottlarg:"0"` + IntLikeGetterArg IntLikeGetter[any] } func functionWithIntLikeGetter(IntLikeGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1531,7 +1451,7 @@ func functionWithIntLikeGetter(IntLikeGetter[interface{}]) (ExprFunc[interface{} } type pMapGetterArguments struct { - PMapArg PMapGetter[any] `ottlarg:"0"` + PMapArg PMapGetter[any] } func functionWithPMapGetter(PMapGetter[interface{}]) (ExprFunc[interface{}], error) { @@ -1541,7 +1461,7 @@ func functionWithPMapGetter(PMapGetter[interface{}]) (ExprFunc[interface{}], err } type stringArguments struct { - StringArg string `ottlarg:"0"` + StringArg string } func functionWithString(string) (ExprFunc[interface{}], error) { @@ -1551,7 +1471,7 @@ func functionWithString(string) (ExprFunc[interface{}], error) { } type floatArguments struct { - FloatArg float64 `ottlarg:"0"` + FloatArg float64 } func functionWithFloat(float64) (ExprFunc[interface{}], error) { @@ -1561,7 +1481,7 @@ func functionWithFloat(float64) (ExprFunc[interface{}], error) { } type intArguments struct { - IntArg int64 `ottlarg:"0"` + IntArg int64 } func functionWithInt(int64) (ExprFunc[interface{}], error) { @@ -1571,7 +1491,7 @@ func functionWithInt(int64) (ExprFunc[interface{}], error) { } type boolArguments struct { - BoolArg bool `ottlarg:"0"` + BoolArg bool } func functionWithBool(bool) (ExprFunc[interface{}], error) { @@ -1581,10 +1501,10 @@ func functionWithBool(bool) (ExprFunc[interface{}], error) { } type multipleArgsArguments struct { - GetSetterArg GetSetter[any] `ottlarg:"0"` - StringArg string `ottlarg:"1"` - FloatArg float64 `ottlarg:"2"` - IntArg int64 `ottlarg:"3"` + GetSetterArg GetSetter[any] + StringArg string + FloatArg float64 + IntArg int64 } func functionWithMultipleArgs(GetSetter[interface{}], string, float64, int64) (ExprFunc[interface{}], error) { @@ -1603,7 +1523,7 @@ func functionThatHasAnError() (ExprFunc[interface{}], error) { } type enumArguments struct { - EnumArg Enum `ottlarg:"0"` + EnumArg Enum } func functionWithEnum(Enum) (ExprFunc[interface{}], error) { @@ -1612,26 +1532,6 @@ func functionWithEnum(Enum) (ExprFunc[interface{}], error) { }, nil } -type noStructTagFunctionArguments struct { - StringArg string -} - -type badStructTagFunctionArguments struct { - StringArg string `ottlarg:"a"` -} - -type negativeStructTagFunctionArguments struct { - StringArg string `ottlarg:"-1"` -} - -type outOfBoundsStructTagFunctionArguments struct { - StringArg string `ottlarg:"1"` -} - -type wrongTagFunctionArguments struct { - StringArg string `argument:"1"` -} - func createFactory[A any](name string, args A, fn any) Factory[any] { createFunction := func(fCtx FunctionContext, oArgs Arguments) (ExprFunc[any], error) { fArgs, ok := oArgs.(A) diff --git a/pkg/ottl/math_test.go b/pkg/ottl/math_test.go index bb279a7bf29b..30e58c273e8d 100644 --- a/pkg/ottl/math_test.go +++ b/pkg/ottl/math_test.go @@ -83,7 +83,7 @@ func testDuration[K any](duration string) (ExprFunc[K], error) { } type sumArguments struct { - Ints []int64 `ottlarg:"0"` + Ints []int64 } //nolint:unparam @@ -514,11 +514,11 @@ func Test_evaluateMathExpression_error(t *testing.T) { createFactory("threePointOne", &struct{}{}, threePointOne[any]), createFactory("sum", &sumArguments{}, sum[any]), createFactory("Time", &struct { - Time string `ottlarg:"0"` - Format string `ottlarg:"1"` + Time string + Format string }{}, testTime[any]), createFactory("Duration", &struct { - Duration string `ottlarg:"0"` + Duration string }{}, testDuration[any]), ) @@ -564,11 +564,11 @@ func Test_evaluateMathExpression_error(t *testing.T) { func Test_evaluateMathExpressionTimeDuration(t *testing.T) { functions := CreateFactoryMap( createFactory("Time", &struct { - Time string `ottlarg:"0"` - Format string `ottlarg:"1"` + Time string + Format string }{}, testTime[any]), createFactory("Duration", &struct { - Duration string `ottlarg:"0"` + Duration string }{}, testDuration[any]), ) diff --git a/pkg/ottl/ottlfuncs/func_concat.go b/pkg/ottl/ottlfuncs/func_concat.go index cdf1a11acbdf..08a0db0d863c 100644 --- a/pkg/ottl/ottlfuncs/func_concat.go +++ b/pkg/ottl/ottlfuncs/func_concat.go @@ -12,8 +12,8 @@ import ( ) type ConcatArguments[K any] struct { - Vals []ottl.StringLikeGetter[K] `ottlarg:"0"` - Delimiter string `ottlarg:"1"` + Vals []ottl.StringLikeGetter[K] + Delimiter string } func NewConcatFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_convert_case.go b/pkg/ottl/ottlfuncs/func_convert_case.go index 4fd71ab9ffb4..79970fd3322f 100644 --- a/pkg/ottl/ottlfuncs/func_convert_case.go +++ b/pkg/ottl/ottlfuncs/func_convert_case.go @@ -14,8 +14,8 @@ import ( ) type ConvertCaseArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` - ToCase string `ottlarg:"1"` + Target ottl.StringGetter[K] + ToCase string } func NewConvertCaseFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_delete_key.go b/pkg/ottl/ottlfuncs/func_delete_key.go index 3bbe0879f16a..443f3d8df5b1 100644 --- a/pkg/ottl/ottlfuncs/func_delete_key.go +++ b/pkg/ottl/ottlfuncs/func_delete_key.go @@ -11,8 +11,8 @@ import ( ) type DeleteKeyArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Key string `ottlarg:"1"` + Target ottl.PMapGetter[K] + Key string } func NewDeleteKeyFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_delete_matching_keys.go b/pkg/ottl/ottlfuncs/func_delete_matching_keys.go index ab0e5078776d..18144d046889 100644 --- a/pkg/ottl/ottlfuncs/func_delete_matching_keys.go +++ b/pkg/ottl/ottlfuncs/func_delete_matching_keys.go @@ -14,8 +14,8 @@ import ( ) type DeleteMatchingKeysArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Pattern string `ottlarg:"1"` + Target ottl.PMapGetter[K] + Pattern string } func NewDeleteMatchingKeysFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_duration.go b/pkg/ottl/ottlfuncs/func_duration.go index 85d0c6f27eda..395d1fed05be 100644 --- a/pkg/ottl/ottlfuncs/func_duration.go +++ b/pkg/ottl/ottlfuncs/func_duration.go @@ -12,7 +12,7 @@ import ( ) type DurationArguments[K any] struct { - Duration ottl.StringGetter[K] `ottlarg:"0"` + Duration ottl.StringGetter[K] } func NewDurationFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_extract_patterns.go b/pkg/ottl/ottlfuncs/func_extract_patterns.go index 4101e8d9182f..8eef411ebdfb 100644 --- a/pkg/ottl/ottlfuncs/func_extract_patterns.go +++ b/pkg/ottl/ottlfuncs/func_extract_patterns.go @@ -14,8 +14,8 @@ import ( ) type ExtractPatternsArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` - Pattern string `ottlarg:"1"` + Target ottl.StringGetter[K] + Pattern string } func NewExtractPatternsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_fnv.go b/pkg/ottl/ottlfuncs/func_fnv.go index 026a4d1f1b02..8da56e599748 100644 --- a/pkg/ottl/ottlfuncs/func_fnv.go +++ b/pkg/ottl/ottlfuncs/func_fnv.go @@ -12,7 +12,7 @@ import ( ) type FnvArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` + Target ottl.StringGetter[K] } func NewFnvFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_hours.go b/pkg/ottl/ottlfuncs/func_hours.go index bec1cf466d96..c4b465503996 100644 --- a/pkg/ottl/ottlfuncs/func_hours.go +++ b/pkg/ottl/ottlfuncs/func_hours.go @@ -11,7 +11,7 @@ import ( ) type HoursArguments[K any] struct { - Duration ottl.DurationGetter[K] `ottlarg:"0"` + Duration ottl.DurationGetter[K] } func NewHoursFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_int.go b/pkg/ottl/ottlfuncs/func_int.go index 2de3c39fe18c..94c7c1356394 100644 --- a/pkg/ottl/ottlfuncs/func_int.go +++ b/pkg/ottl/ottlfuncs/func_int.go @@ -11,7 +11,7 @@ import ( ) type IntArguments[K any] struct { - Target ottl.IntLikeGetter[K] `ottlarg:"0"` + Target ottl.IntLikeGetter[K] } func NewIntFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_is_map.go b/pkg/ottl/ottlfuncs/func_is_map.go index afb470b9bf3f..25dfb8383994 100644 --- a/pkg/ottl/ottlfuncs/func_is_map.go +++ b/pkg/ottl/ottlfuncs/func_is_map.go @@ -11,7 +11,7 @@ import ( ) type IsMapArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` + Target ottl.PMapGetter[K] } func NewIsMapFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_is_match.go b/pkg/ottl/ottlfuncs/func_is_match.go index a4e48b9de345..b9f09c11cf20 100644 --- a/pkg/ottl/ottlfuncs/func_is_match.go +++ b/pkg/ottl/ottlfuncs/func_is_match.go @@ -12,8 +12,8 @@ import ( ) type IsMatchArguments[K any] struct { - Target ottl.StringLikeGetter[K] `ottlarg:"0"` - Pattern string `ottlarg:"1"` + Target ottl.StringLikeGetter[K] + Pattern string } func NewIsMatchFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_is_string.go b/pkg/ottl/ottlfuncs/func_is_string.go index 62560ab8244b..29d2a1843c93 100644 --- a/pkg/ottl/ottlfuncs/func_is_string.go +++ b/pkg/ottl/ottlfuncs/func_is_string.go @@ -11,7 +11,7 @@ import ( ) type IsStringArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` + Target ottl.StringGetter[K] } func NewIsStringFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_keep_keys.go b/pkg/ottl/ottlfuncs/func_keep_keys.go index 035bade156c3..59b58ead88ea 100644 --- a/pkg/ottl/ottlfuncs/func_keep_keys.go +++ b/pkg/ottl/ottlfuncs/func_keep_keys.go @@ -13,8 +13,8 @@ import ( ) type KeepKeysArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Keys []string `ottlarg:"1"` + Target ottl.PMapGetter[K] + Keys []string } func NewKeepKeysFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_len.go b/pkg/ottl/ottlfuncs/func_len.go index a1d9e50940a2..9a566b05562c 100644 --- a/pkg/ottl/ottlfuncs/func_len.go +++ b/pkg/ottl/ottlfuncs/func_len.go @@ -21,7 +21,7 @@ const ( ) type LenArguments[K any] struct { - Target ottl.Getter[K] `ottlarg:"0"` + Target ottl.Getter[K] } func NewLenFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_limit.go b/pkg/ottl/ottlfuncs/func_limit.go index 51a645df8aa3..1fec372fa174 100644 --- a/pkg/ottl/ottlfuncs/func_limit.go +++ b/pkg/ottl/ottlfuncs/func_limit.go @@ -13,9 +13,9 @@ import ( ) type LimitArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Limit int64 `ottlarg:"1"` - PriorityKeys []string `ottlarg:"2"` + Target ottl.PMapGetter[K] + Limit int64 + PriorityKeys []string } func NewLimitFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_log.go b/pkg/ottl/ottlfuncs/func_log.go index 1f24c197f560..8e6111ad8fb8 100644 --- a/pkg/ottl/ottlfuncs/func_log.go +++ b/pkg/ottl/ottlfuncs/func_log.go @@ -12,7 +12,7 @@ import ( ) type LogArguments[K any] struct { - Target ottl.FloatLikeGetter[K] `ottlarg:"0"` + Target ottl.FloatLikeGetter[K] } func NewLogFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_merge_maps.go b/pkg/ottl/ottlfuncs/func_merge_maps.go index e9d24a160f65..5560def71010 100644 --- a/pkg/ottl/ottlfuncs/func_merge_maps.go +++ b/pkg/ottl/ottlfuncs/func_merge_maps.go @@ -19,9 +19,9 @@ const ( ) type MergeMapsArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Source ottl.PMapGetter[K] `ottlarg:"1"` - Strategy string `ottlarg:"2"` + Target ottl.PMapGetter[K] + Source ottl.PMapGetter[K] + Strategy string } func NewMergeMapsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_microseconds.go b/pkg/ottl/ottlfuncs/func_microseconds.go index ec089a7c8184..9c0bdc94c096 100644 --- a/pkg/ottl/ottlfuncs/func_microseconds.go +++ b/pkg/ottl/ottlfuncs/func_microseconds.go @@ -11,7 +11,7 @@ import ( ) type MicrosecondsArguments[K any] struct { - Duration ottl.DurationGetter[K] `ottlarg:"0"` + Duration ottl.DurationGetter[K] } func NewMicrosecondsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_milliseconds.go b/pkg/ottl/ottlfuncs/func_milliseconds.go index dee4d27c4730..cd59ffb54c7b 100644 --- a/pkg/ottl/ottlfuncs/func_milliseconds.go +++ b/pkg/ottl/ottlfuncs/func_milliseconds.go @@ -11,7 +11,7 @@ import ( ) type MillisecondsArguments[K any] struct { - Duration ottl.DurationGetter[K] `ottlarg:"0"` + Duration ottl.DurationGetter[K] } func NewMillisecondsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_minutes.go b/pkg/ottl/ottlfuncs/func_minutes.go index d62cd4ecf2f2..366922d4b722 100644 --- a/pkg/ottl/ottlfuncs/func_minutes.go +++ b/pkg/ottl/ottlfuncs/func_minutes.go @@ -11,7 +11,7 @@ import ( ) type MinutesArguments[K any] struct { - Duration ottl.DurationGetter[K] `ottlarg:"0"` + Duration ottl.DurationGetter[K] } func NewMinutesFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_nanoseconds.go b/pkg/ottl/ottlfuncs/func_nanoseconds.go index 9d31fb846ec6..95700900501c 100644 --- a/pkg/ottl/ottlfuncs/func_nanoseconds.go +++ b/pkg/ottl/ottlfuncs/func_nanoseconds.go @@ -11,7 +11,7 @@ import ( ) type NanosecondsArguments[K any] struct { - Duration ottl.DurationGetter[K] `ottlarg:"0"` + Duration ottl.DurationGetter[K] } func NewNanosecondsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_parse_json.go b/pkg/ottl/ottlfuncs/func_parse_json.go index 4355dd62c7c7..7ca06f83034b 100644 --- a/pkg/ottl/ottlfuncs/func_parse_json.go +++ b/pkg/ottl/ottlfuncs/func_parse_json.go @@ -14,7 +14,7 @@ import ( ) type ParseJSONArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` + Target ottl.StringGetter[K] } func NewParseJSONFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_replace_all_matches.go b/pkg/ottl/ottlfuncs/func_replace_all_matches.go index 457a4582477c..4a686be2e958 100644 --- a/pkg/ottl/ottlfuncs/func_replace_all_matches.go +++ b/pkg/ottl/ottlfuncs/func_replace_all_matches.go @@ -14,9 +14,9 @@ import ( ) type ReplaceAllMatchesArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Pattern string `ottlarg:"1"` - Replacement ottl.StringGetter[K] `ottlarg:"2"` + Target ottl.PMapGetter[K] + Pattern string + Replacement ottl.StringGetter[K] } func NewReplaceAllMatchesFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_replace_all_patterns.go b/pkg/ottl/ottlfuncs/func_replace_all_patterns.go index f4d9044c93a5..c583f00a34ba 100644 --- a/pkg/ottl/ottlfuncs/func_replace_all_patterns.go +++ b/pkg/ottl/ottlfuncs/func_replace_all_patterns.go @@ -19,10 +19,10 @@ const ( ) type ReplaceAllPatternsArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Mode string `ottlarg:"1"` - RegexPattern string `ottlarg:"2"` - Replacement ottl.StringGetter[K] `ottlarg:"3"` + Target ottl.PMapGetter[K] + Mode string + RegexPattern string + Replacement ottl.StringGetter[K] } func NewReplaceAllPatternsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_replace_match.go b/pkg/ottl/ottlfuncs/func_replace_match.go index 3e15bb87c849..578e84becfff 100644 --- a/pkg/ottl/ottlfuncs/func_replace_match.go +++ b/pkg/ottl/ottlfuncs/func_replace_match.go @@ -13,9 +13,9 @@ import ( ) type ReplaceMatchArguments[K any] struct { - Target ottl.GetSetter[K] `ottlarg:"0"` - Pattern string `ottlarg:"1"` - Replacement ottl.StringGetter[K] `ottlarg:"2"` + Target ottl.GetSetter[K] + Pattern string + Replacement ottl.StringGetter[K] } func NewReplaceMatchFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_replace_pattern.go b/pkg/ottl/ottlfuncs/func_replace_pattern.go index cd5139d1c514..1386fec80c01 100644 --- a/pkg/ottl/ottlfuncs/func_replace_pattern.go +++ b/pkg/ottl/ottlfuncs/func_replace_pattern.go @@ -12,9 +12,9 @@ import ( ) type ReplacePatternArguments[K any] struct { - Target ottl.GetSetter[K] `ottlarg:"0"` - RegexPattern string `ottlarg:"1"` - Replacement ottl.StringGetter[K] `ottlarg:"2"` + Target ottl.GetSetter[K] + RegexPattern string + Replacement ottl.StringGetter[K] } func NewReplacePatternFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_seconds.go b/pkg/ottl/ottlfuncs/func_seconds.go index 403d68d99bb5..da570ce1d5fe 100644 --- a/pkg/ottl/ottlfuncs/func_seconds.go +++ b/pkg/ottl/ottlfuncs/func_seconds.go @@ -11,7 +11,7 @@ import ( ) type SecondsArguments[K any] struct { - Duration ottl.DurationGetter[K] `ottlarg:"0"` + Duration ottl.DurationGetter[K] } func NewSecondsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_set.go b/pkg/ottl/ottlfuncs/func_set.go index 104fd9fdd4a6..6a8f0da3f0fb 100644 --- a/pkg/ottl/ottlfuncs/func_set.go +++ b/pkg/ottl/ottlfuncs/func_set.go @@ -11,8 +11,8 @@ import ( ) type SetArguments[K any] struct { - Target ottl.Setter[K] `ottlarg:"0"` - Value ottl.Getter[K] `ottlarg:"1"` + Target ottl.Setter[K] + Value ottl.Getter[K] } func NewSetFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_sha1.go b/pkg/ottl/ottlfuncs/func_sha1.go index f8eb721b935a..e091d2110fb6 100644 --- a/pkg/ottl/ottlfuncs/func_sha1.go +++ b/pkg/ottl/ottlfuncs/func_sha1.go @@ -13,7 +13,7 @@ import ( ) type SHA1Arguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` + Target ottl.StringGetter[K] } func NewSHA1Factory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_sha256.go b/pkg/ottl/ottlfuncs/func_sha256.go index fc8a9259b311..179d8bbe7b2f 100644 --- a/pkg/ottl/ottlfuncs/func_sha256.go +++ b/pkg/ottl/ottlfuncs/func_sha256.go @@ -13,7 +13,7 @@ import ( ) type SHA256Arguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` + Target ottl.StringGetter[K] } func NewSHA256Factory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_span_id.go b/pkg/ottl/ottlfuncs/func_span_id.go index 123b193b1923..74f9ee910ad5 100644 --- a/pkg/ottl/ottlfuncs/func_span_id.go +++ b/pkg/ottl/ottlfuncs/func_span_id.go @@ -14,7 +14,7 @@ import ( ) type SpanIDArguments[K any] struct { - Bytes []byte `ottlarg:"0"` + Bytes []byte } func NewSpanIDFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_split.go b/pkg/ottl/ottlfuncs/func_split.go index b80c951df16e..24e45fcb5c09 100644 --- a/pkg/ottl/ottlfuncs/func_split.go +++ b/pkg/ottl/ottlfuncs/func_split.go @@ -12,8 +12,8 @@ import ( ) type SplitArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` - Delimiter string `ottlarg:"1"` + Target ottl.StringGetter[K] + Delimiter string } func NewSplitFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_substring.go b/pkg/ottl/ottlfuncs/func_substring.go index b1658a8dc608..974a110b8716 100644 --- a/pkg/ottl/ottlfuncs/func_substring.go +++ b/pkg/ottl/ottlfuncs/func_substring.go @@ -11,9 +11,9 @@ import ( ) type SubstringArguments[K any] struct { - Target ottl.StringGetter[K] `ottlarg:"0"` - Start ottl.IntGetter[K] `ottlarg:"1"` - Length ottl.IntGetter[K] `ottlarg:"2"` + Target ottl.StringGetter[K] + Start ottl.IntGetter[K] + Length ottl.IntGetter[K] } func NewSubstringFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_time.go b/pkg/ottl/ottlfuncs/func_time.go index 93202675772d..427069eb2cdb 100644 --- a/pkg/ottl/ottlfuncs/func_time.go +++ b/pkg/ottl/ottlfuncs/func_time.go @@ -12,8 +12,8 @@ import ( ) type TimeArguments[K any] struct { - Time ottl.StringGetter[K] `ottlarg:"0"` - Format string `ottlarg:"1"` + Time ottl.StringGetter[K] + Format string } func NewTimeFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_trace_id.go b/pkg/ottl/ottlfuncs/func_trace_id.go index b01630360338..c28013b75ab2 100644 --- a/pkg/ottl/ottlfuncs/func_trace_id.go +++ b/pkg/ottl/ottlfuncs/func_trace_id.go @@ -14,7 +14,7 @@ import ( ) type TraceIDArguments[K any] struct { - Bytes []byte `ottlarg:"0"` + Bytes []byte } func NewTraceIDFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_truncate_all.go b/pkg/ottl/ottlfuncs/func_truncate_all.go index 5196476a8b3f..5d1a7fc08365 100644 --- a/pkg/ottl/ottlfuncs/func_truncate_all.go +++ b/pkg/ottl/ottlfuncs/func_truncate_all.go @@ -13,8 +13,8 @@ import ( ) type TruncateAllArguments[K any] struct { - Target ottl.PMapGetter[K] `ottlarg:"0"` - Limit int64 `ottlarg:"1"` + Target ottl.PMapGetter[K] + Limit int64 } func NewTruncateAllFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_unix_micro.go b/pkg/ottl/ottlfuncs/func_unix_micro.go index 623903660c3f..94a5e010dd4f 100644 --- a/pkg/ottl/ottlfuncs/func_unix_micro.go +++ b/pkg/ottl/ottlfuncs/func_unix_micro.go @@ -11,7 +11,7 @@ import ( ) type UnixMicroArguments[K any] struct { - Time ottl.TimeGetter[K] `ottlarg:"0"` + Time ottl.TimeGetter[K] } func NewUnixMicroFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_unix_milli.go b/pkg/ottl/ottlfuncs/func_unix_milli.go index 94a60e7388c2..dac3f7d65237 100644 --- a/pkg/ottl/ottlfuncs/func_unix_milli.go +++ b/pkg/ottl/ottlfuncs/func_unix_milli.go @@ -11,7 +11,7 @@ import ( ) type UnixMilliArguments[K any] struct { - Time ottl.TimeGetter[K] `ottlarg:"0"` + Time ottl.TimeGetter[K] } func NewUnixMilliFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_unix_nano.go b/pkg/ottl/ottlfuncs/func_unix_nano.go index b8e396479b6c..8935aea5325e 100644 --- a/pkg/ottl/ottlfuncs/func_unix_nano.go +++ b/pkg/ottl/ottlfuncs/func_unix_nano.go @@ -11,7 +11,7 @@ import ( ) type UnixNanoArguments[K any] struct { - Time ottl.TimeGetter[K] `ottlarg:"0"` + Time ottl.TimeGetter[K] } func NewUnixNanoFactory[K any]() ottl.Factory[K] { diff --git a/pkg/ottl/ottlfuncs/func_unix_seconds.go b/pkg/ottl/ottlfuncs/func_unix_seconds.go index 9d8e8ebd254b..c79c6b9f7bcc 100644 --- a/pkg/ottl/ottlfuncs/func_unix_seconds.go +++ b/pkg/ottl/ottlfuncs/func_unix_seconds.go @@ -11,7 +11,7 @@ import ( ) type UnixSecondsArguments[K any] struct { - Time ottl.TimeGetter[K] `ottlarg:"0"` + Time ottl.TimeGetter[K] } func NewUnixSecondsFactory[K any]() ottl.Factory[K] { diff --git a/pkg/stanza/fileconsumer/config.go b/pkg/stanza/fileconsumer/config.go index c0158b4a5d2b..d5416d8da88a 100644 --- a/pkg/stanza/fileconsumer/config.go +++ b/pkg/stanza/fileconsumer/config.go @@ -119,7 +119,7 @@ func (c Config) BuildWithSplitFunc(logger *zap.SugaredLogger, emit emit.Callback } // Ensure that splitter is buildable - factory := splitter.NewCustomFactory(splitFunc, c.FlushPeriod) + factory := splitter.NewCustomFactory(splitFunc, c.TrimConfig.Func(), c.FlushPeriod) if _, err := factory.SplitFunc(); err != nil { return nil, err } diff --git a/pkg/stanza/fileconsumer/file.go b/pkg/stanza/fileconsumer/file.go index d610184ca1b9..5f28bf15d0e2 100644 --- a/pkg/stanza/fileconsumer/file.go +++ b/pkg/stanza/fileconsumer/file.go @@ -115,7 +115,7 @@ func (m *Manager) poll(ctx context.Context) { // Get the list of paths on disk matches, err := m.fileMatcher.MatchFiles() if err != nil { - m.Warnf("finding files: %v", err) + m.Infof("finding files: %v", err) } for len(matches) > m.maxBatchFiles { diff --git a/pkg/stanza/fileconsumer/internal/header/config.go b/pkg/stanza/fileconsumer/internal/header/config.go index c28ba2ac103a..618d1b1fa714 100644 --- a/pkg/stanza/fileconsumer/internal/header/config.go +++ b/pkg/stanza/fileconsumer/internal/header/config.go @@ -16,6 +16,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/split" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim" ) type Config struct { @@ -69,13 +70,16 @@ func NewConfig(matchRegex string, metadataOperators []operator.Config, enc encod return nil, fmt.Errorf("failed to compile `pattern`: %w", err) } - splitFunc, err := split.NewlineSplitFunc(enc, false, func(b []byte) []byte { - return bytes.Trim(b, "\r\n") - }) + splitFunc, err := split.NewlineSplitFunc(enc, false) if err != nil { return nil, fmt.Errorf("failed to create split func: %w", err) } + var trimFunc trim.Func = func(b []byte) []byte { + return bytes.Trim(b, "\r\n") + } + splitFunc = trim.WithFunc(splitFunc, trimFunc) + return &Config{ regex: regex, SplitFunc: splitFunc, diff --git a/pkg/stanza/fileconsumer/internal/splitter/custom.go b/pkg/stanza/fileconsumer/internal/splitter/custom.go index 52fe9125e627..6cb8afced95f 100644 --- a/pkg/stanza/fileconsumer/internal/splitter/custom.go +++ b/pkg/stanza/fileconsumer/internal/splitter/custom.go @@ -13,19 +13,21 @@ import ( type customFactory struct { splitFunc bufio.SplitFunc + trimFunc trim.Func flushPeriod time.Duration } var _ Factory = (*customFactory)(nil) -func NewCustomFactory(splitFunc bufio.SplitFunc, flushPeriod time.Duration) Factory { +func NewCustomFactory(splitFunc bufio.SplitFunc, trimFunc trim.Func, flushPeriod time.Duration) Factory { return &customFactory{ splitFunc: splitFunc, + trimFunc: trimFunc, flushPeriod: flushPeriod, } } // SplitFunc builds a bufio.SplitFunc based on the configuration func (f *customFactory) SplitFunc() (bufio.SplitFunc, error) { - return flush.WithPeriod(f.splitFunc, trim.Nop, f.flushPeriod), nil + return trim.WithFunc(flush.WithPeriod(f.splitFunc, f.flushPeriod), f.trimFunc), nil } diff --git a/pkg/stanza/fileconsumer/internal/splitter/custom_test.go b/pkg/stanza/fileconsumer/internal/splitter/custom_test.go index a03d7e0f290b..1b65b08d0097 100644 --- a/pkg/stanza/fileconsumer/internal/splitter/custom_test.go +++ b/pkg/stanza/fileconsumer/internal/splitter/custom_test.go @@ -9,10 +9,12 @@ import ( "time" "github.com/stretchr/testify/assert" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim" ) func TestCustom(t *testing.T) { - factory := NewCustomFactory(bufio.ScanLines, 0) + factory := NewCustomFactory(bufio.ScanLines, trim.Nop, 0) splitFunc, err := factory.SplitFunc() assert.NoError(t, err) assert.NotNil(t, splitFunc) @@ -35,9 +37,33 @@ func TestCustom(t *testing.T) { assert.Nil(t, token) } +func TestCustomWithTrim(t *testing.T) { + factory := NewCustomFactory(bufio.ScanLines, trim.Whitespace, 0) + splitFunc, err := factory.SplitFunc() + assert.NoError(t, err) + assert.NotNil(t, splitFunc) + + input := []byte(" hello \n world \n extra ") + + advance, token, err := splitFunc(input, false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("hello"), token) + + advance, token, err = splitFunc(input[8:], false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("world"), token) + + advance, token, err = splitFunc(input[16:], false) + assert.NoError(t, err) + assert.Equal(t, 0, advance) + assert.Nil(t, token) +} + func TestCustomWithFlush(t *testing.T) { flushPeriod := 100 * time.Millisecond - factory := NewCustomFactory(bufio.ScanLines, flushPeriod) + factory := NewCustomFactory(bufio.ScanLines, trim.Nop, flushPeriod) splitFunc, err := factory.SplitFunc() assert.NoError(t, err) assert.NotNil(t, splitFunc) @@ -66,3 +92,35 @@ func TestCustomWithFlush(t *testing.T) { assert.Equal(t, 7, advance) assert.Equal(t, []byte(" extra "), token) } + +func TestCustomWithFlushTrim(t *testing.T) { + flushPeriod := 100 * time.Millisecond + factory := NewCustomFactory(bufio.ScanLines, trim.Whitespace, flushPeriod) + splitFunc, err := factory.SplitFunc() + assert.NoError(t, err) + assert.NotNil(t, splitFunc) + + input := []byte(" hello \n world \n extra ") + + advance, token, err := splitFunc(input, false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("hello"), token) + + advance, token, err = splitFunc(input[8:], false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("world"), token) + + advance, token, err = splitFunc(input[16:], false) + assert.NoError(t, err) + assert.Equal(t, 0, advance) + assert.Nil(t, token) + + time.Sleep(2 * flushPeriod) + + advance, token, err = splitFunc(input[16:], false) + assert.NoError(t, err) + assert.Equal(t, 7, advance) + assert.Equal(t, []byte("extra"), token) // Ensure trim applies to flushed token +} diff --git a/pkg/stanza/fileconsumer/internal/splitter/multiline.go b/pkg/stanza/fileconsumer/internal/splitter/multiline.go index ae770350ed80..917bf2aeddb7 100644 --- a/pkg/stanza/fileconsumer/internal/splitter/multiline.go +++ b/pkg/stanza/fileconsumer/internal/splitter/multiline.go @@ -42,9 +42,14 @@ func NewSplitFuncFactory( // SplitFunc builds a bufio.SplitFunc based on the configuration func (f *splitFuncFactory) SplitFunc() (bufio.SplitFunc, error) { - splitFunc, err := f.splitConfig.Func(f.encoding, false, f.maxLogSize, f.trimFunc) + splitFunc, err := f.splitConfig.Func(f.encoding, false, f.maxLogSize) if err != nil { return nil, err } - return flush.WithPeriod(splitFunc, f.trimFunc, f.flushPeriod), nil + splitFunc = flush.WithPeriod(splitFunc, f.flushPeriod) + if f.encoding == encoding.Nop { + // Special case where we should never trim + return splitFunc, nil + } + return trim.WithFunc(splitFunc, f.trimFunc), nil } diff --git a/pkg/stanza/fileconsumer/internal/splitter/multiline_test.go b/pkg/stanza/fileconsumer/internal/splitter/multiline_test.go index d0207f964762..89b773ad802d 100644 --- a/pkg/stanza/fileconsumer/internal/splitter/multiline_test.go +++ b/pkg/stanza/fileconsumer/internal/splitter/multiline_test.go @@ -49,6 +49,30 @@ func TestSplitFunc(t *testing.T) { assert.Nil(t, token) } +func TestSplitFuncWithTrim(t *testing.T) { + factory := NewSplitFuncFactory(split.Config{}, unicode.UTF8, 1024, trim.Whitespace, 0) + splitFunc, err := factory.SplitFunc() + assert.NoError(t, err) + assert.NotNil(t, splitFunc) + + input := []byte(" hello \n world \n extra ") + + advance, token, err := splitFunc(input, false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("hello"), token) + + advance, token, err = splitFunc(input[8:], false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("world"), token) + + advance, token, err = splitFunc(input[16:], false) + assert.NoError(t, err) + assert.Equal(t, 0, advance) + assert.Nil(t, token) +} + func TestSplitFuncWithFlush(t *testing.T) { flushPeriod := 100 * time.Millisecond factory := NewSplitFuncFactory(split.Config{}, unicode.UTF8, 1024, trim.Nop, flushPeriod) @@ -81,7 +105,7 @@ func TestSplitFuncWithFlush(t *testing.T) { assert.Equal(t, []byte(" extra "), token) } -func TestSplitFuncWithTrim(t *testing.T) { +func TestSplitFuncWithFlushTrim(t *testing.T) { flushPeriod := 100 * time.Millisecond factory := NewSplitFuncFactory(split.Config{}, unicode.UTF8, 1024, trim.Whitespace, flushPeriod) splitFunc, err := factory.SplitFunc() diff --git a/pkg/stanza/flush/flush.go b/pkg/stanza/flush/flush.go index f42e18c82370..4197f527972d 100644 --- a/pkg/stanza/flush/flush.go +++ b/pkg/stanza/flush/flush.go @@ -6,12 +6,10 @@ package flush // import "github.com/open-telemetry/opentelemetry-collector-contr import ( "bufio" "time" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim" ) // Wrap a bufio.SplitFunc with a flusher -func WithPeriod(splitFunc bufio.SplitFunc, trimFunc trim.Func, period time.Duration) bufio.SplitFunc { +func WithPeriod(splitFunc bufio.SplitFunc, period time.Duration) bufio.SplitFunc { if period <= 0 { return splitFunc } @@ -20,7 +18,7 @@ func WithPeriod(splitFunc bufio.SplitFunc, trimFunc trim.Func, period time.Durat forcePeriod: period, previousDataLength: 0, } - return f.splitFunc(splitFunc, trimFunc) + return f.splitFunc(splitFunc) } // flusher keeps information about flush state @@ -61,7 +59,7 @@ func (f *flusher) shouldFlush() bool { return f.forcePeriod > 0 && time.Since(f.lastDataChange) > f.forcePeriod && f.previousDataLength > 0 } -func (f *flusher) splitFunc(splitFunc bufio.SplitFunc, trimFunc trim.Func) bufio.SplitFunc { +func (f *flusher) splitFunc(splitFunc bufio.SplitFunc) bufio.SplitFunc { return func(data []byte, atEOF bool) (advance int, token []byte, err error) { advance, token, err = splitFunc(data, atEOF) @@ -81,7 +79,7 @@ func (f *flusher) splitFunc(splitFunc bufio.SplitFunc, trimFunc trim.Func) bufio if f.shouldFlush() { // Inform flusher that we just flushed f.flushed() - token = trimFunc(data) + token = data advance = len(data) return } diff --git a/pkg/stanza/flush/flush_test.go b/pkg/stanza/flush/flush_test.go index 25d3aec0212b..140308274033 100644 --- a/pkg/stanza/flush/flush_test.go +++ b/pkg/stanza/flush/flush_test.go @@ -9,8 +9,6 @@ import ( "time" "github.com/stretchr/testify/assert" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim" ) func TestFlusher(t *testing.T) { @@ -22,7 +20,7 @@ func TestFlusher(t *testing.T) { // always use atEOF=false. flushPeriod := 100 * time.Millisecond - f := WithPeriod(bufio.ScanWords, trim.Nop, flushPeriod) + f := WithPeriod(bufio.ScanWords, flushPeriod) content := []byte("foo bar hellowo") @@ -64,7 +62,7 @@ func TestNoFlushPeriod(t *testing.T) { // In other words, we should expect exactly the behavior of bufio.ScanWords. flushPeriod := time.Duration(0) - f := WithPeriod(bufio.ScanWords, trim.Nop, flushPeriod) + f := WithPeriod(bufio.ScanWords, flushPeriod) content := []byte("foo bar hellowo") diff --git a/pkg/stanza/go.mod b/pkg/stanza/go.mod index 1d42e1c968ed..7a82cc48da04 100644 --- a/pkg/stanza/go.mod +++ b/pkg/stanza/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza go 1.20 require ( - github.com/antonmedv/expr v1.15.1 + github.com/antonmedv/expr v1.15.2 github.com/bmatcuk/doublestar/v4 v4.6.0 github.com/cespare/xxhash/v2 v2.2.0 github.com/influxdata/go-syslog/v3 v3.0.1-0.20210608084020-ac565dc76ba6 diff --git a/pkg/stanza/go.sum b/pkg/stanza/go.sum index 8b0a119af10c..b8582f3d6b4d 100644 --- a/pkg/stanza/go.sum +++ b/pkg/stanza/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/pkg/stanza/operator/input/tcp/tcp.go b/pkg/stanza/operator/input/tcp/tcp.go index 2a618e7d1054..2dd553e77512 100644 --- a/pkg/stanza/operator/input/tcp/tcp.go +++ b/pkg/stanza/operator/input/tcp/tcp.go @@ -81,13 +81,8 @@ type BaseConfig struct { type SplitFuncBuilder func(enc encoding.Encoding) (bufio.SplitFunc, error) -func (c Config) defaultMultilineBuilder(enc encoding.Encoding) (bufio.SplitFunc, error) { - trimFunc := c.TrimConfig.Func() - splitFunc, err := c.SplitConfig.Func(enc, true, int(c.MaxLogSize), trimFunc) - if err != nil { - return nil, err - } - return splitFunc, nil +func (c Config) defaultSplitFuncBuilder(enc encoding.Encoding) (bufio.SplitFunc, error) { + return c.SplitConfig.Func(enc, true, int(c.MaxLogSize)) } // Build will build a tcp input operator. @@ -121,7 +116,7 @@ func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { } if c.SplitFuncBuilder == nil { - c.SplitFuncBuilder = c.defaultMultilineBuilder + c.SplitFuncBuilder = c.defaultSplitFuncBuilder } // Build split func @@ -129,6 +124,7 @@ func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { if err != nil { return nil, err } + splitFunc = trim.WithFunc(splitFunc, c.TrimConfig.Func()) var resolver *helper.IPResolver if c.AddAttributes { diff --git a/pkg/stanza/operator/input/udp/udp.go b/pkg/stanza/operator/input/udp/udp.go index 0bf1c78f76fe..e45ee9b8d498 100644 --- a/pkg/stanza/operator/input/udp/udp.go +++ b/pkg/stanza/operator/input/udp/udp.go @@ -89,11 +89,12 @@ func (c Config) Build(logger *zap.SugaredLogger) (operator.Operator, error) { return nil, err } - // Build SplitFunc - splitFunc, err := c.SplitConfig.Func(enc, true, MaxUDPSize, c.TrimConfig.Func()) + // Build split func + splitFunc, err := c.SplitConfig.Func(enc, true, MaxUDPSize) if err != nil { return nil, err } + splitFunc = trim.WithFunc(splitFunc, c.TrimConfig.Func()) var resolver *helper.IPResolver if c.AddAttributes { diff --git a/pkg/stanza/split/split.go b/pkg/stanza/split/split.go index 6e0282f52d28..976a70bf90f7 100644 --- a/pkg/stanza/split/split.go +++ b/pkg/stanza/split/split.go @@ -10,8 +10,6 @@ import ( "regexp" "golang.org/x/text/encoding" - - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim" ) // Config is the configuration for a split func @@ -21,54 +19,44 @@ type Config struct { } // Func will return a bufio.SplitFunc based on the config -func (c Config) Func(enc encoding.Encoding, flushAtEOF bool, maxLogSize int, trimFunc trim.Func) (bufio.SplitFunc, error) { - endPattern := c.LineEndPattern - startPattern := c.LineStartPattern - - var ( - splitFunc bufio.SplitFunc - err error - ) - +func (c Config) Func(enc encoding.Encoding, flushAtEOF bool, maxLogSize int) (splitFunc bufio.SplitFunc, err error) { switch { - case endPattern != "" && startPattern != "": + case c.LineEndPattern != "" && c.LineStartPattern != "": return nil, fmt.Errorf("only one of line_start_pattern or line_end_pattern can be set") - case enc == encoding.Nop && (endPattern != "" || startPattern != ""): + case enc == encoding.Nop && (c.LineEndPattern != "" || c.LineStartPattern != ""): return nil, fmt.Errorf("line_start_pattern or line_end_pattern should not be set when using nop encoding") case enc == encoding.Nop: return NoSplitFunc(maxLogSize), nil - case endPattern == "" && startPattern == "": - splitFunc, err = NewlineSplitFunc(enc, flushAtEOF, trimFunc) + case c.LineEndPattern == "" && c.LineStartPattern == "": + splitFunc, err = NewlineSplitFunc(enc, flushAtEOF) if err != nil { return nil, err } - case endPattern != "": + case c.LineEndPattern != "": re, err := regexp.Compile("(?m)" + c.LineEndPattern) if err != nil { return nil, fmt.Errorf("compile line end regex: %w", err) } - splitFunc = LineEndSplitFunc(re, flushAtEOF, trimFunc) - case startPattern != "": + splitFunc = LineEndSplitFunc(re, flushAtEOF) + case c.LineStartPattern != "": re, err := regexp.Compile("(?m)" + c.LineStartPattern) if err != nil { return nil, fmt.Errorf("compile line start regex: %w", err) } - splitFunc = LineStartSplitFunc(re, flushAtEOF, trimFunc) + splitFunc = LineStartSplitFunc(re, flushAtEOF) } return splitFunc, nil } // LineStartSplitFunc creates a bufio.SplitFunc that splits an incoming stream into // tokens that start with a match to the regex pattern provided -func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool, trimFunc trim.Func) bufio.SplitFunc { +func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { return func(data []byte, atEOF bool) (advance int, token []byte, err error) { firstLoc := re.FindIndex(data) if firstLoc == nil { // Flush if no more data is expected if len(data) != 0 && atEOF && flushAtEOF { - token = trimFunc(data) - advance = len(data) - return + return len(data), data, nil } return 0, nil, nil // read more data and try again. } @@ -78,7 +66,7 @@ func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool, trimFunc trim.Func) if firstMatchStart != 0 { // the beginning of the file does not match the start pattern, so return a token up to the first match so we don't lose data advance = firstMatchStart - token = trimFunc(data[0:firstMatchStart]) + token = data[0:firstMatchStart] // return if non-matching pattern is not only whitespaces if token != nil { @@ -93,9 +81,7 @@ func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool, trimFunc trim.Func) // Flush if no more data is expected if atEOF && flushAtEOF { - token = trimFunc(data) - advance = len(data) - return + return len(data), data, nil } secondLocOfset := firstMatchEnd + 1 @@ -105,24 +91,21 @@ func LineStartSplitFunc(re *regexp.Regexp, flushAtEOF bool, trimFunc trim.Func) } secondMatchStart := secondLoc[0] + secondLocOfset - advance = secondMatchStart // start scanning at the beginning of the second match - token = trimFunc(data[firstMatchStart:secondMatchStart]) // the token begins at the first match, and ends at the beginning of the second match - err = nil - return + // start scanning at the beginning of the second match + // the token begins at the first match, and ends at the beginning of the second match + return secondMatchStart, data[firstMatchStart:secondMatchStart], nil } } // LineEndSplitFunc creates a bufio.SplitFunc that splits an incoming stream into // tokens that end with a match to the regex pattern provided -func LineEndSplitFunc(re *regexp.Regexp, flushAtEOF bool, trimFunc trim.Func) bufio.SplitFunc { +func LineEndSplitFunc(re *regexp.Regexp, flushAtEOF bool) bufio.SplitFunc { return func(data []byte, atEOF bool) (advance int, token []byte, err error) { loc := re.FindIndex(data) if loc == nil { // Flush if no more data is expected if len(data) != 0 && atEOF && flushAtEOF { - token = trimFunc(data) - advance = len(data) - return + return len(data), data, nil } return 0, nil, nil // read more data and try again } @@ -132,17 +115,13 @@ func LineEndSplitFunc(re *regexp.Regexp, flushAtEOF bool, trimFunc trim.Func) bu if loc[1] == len(data)-1 && !atEOF { return 0, nil, nil } - - advance = loc[1] - token = trimFunc(data[:loc[1]]) - err = nil - return + return loc[1], data[:loc[1]], nil } } // NewlineSplitFunc splits log lines by newline, just as bufio.ScanLines, but // never returning an token using EOF as a terminator -func NewlineSplitFunc(enc encoding.Encoding, flushAtEOF bool, trimFunc trim.Func) (bufio.SplitFunc, error) { +func NewlineSplitFunc(enc encoding.Encoding, flushAtEOF bool) (bufio.SplitFunc, error) { newline, err := encodedNewline(enc) if err != nil { return nil, err @@ -158,17 +137,19 @@ func NewlineSplitFunc(enc encoding.Encoding, flushAtEOF bool, trimFunc trim.Func return 0, nil, nil } - if i := bytes.Index(data, newline); i >= 0 { + i := bytes.Index(data, newline) + if i == 0 { + return len(newline), []byte{}, nil + } + if i >= 0 { // We have a full newline-terminated line. token = bytes.TrimSuffix(data[:i], carriageReturn) - return i + len(newline), trimFunc(token), nil + return i + len(newline), token, nil } // Flush if no more data is expected if atEOF && flushAtEOF { - token = trimFunc(data) - advance = len(data) - return + return len(data), data, nil } // Request more data. diff --git a/pkg/stanza/split/split_test.go b/pkg/stanza/split/split_test.go index 372768553854..9af21034b1ca 100644 --- a/pkg/stanza/split/split_test.go +++ b/pkg/stanza/split/split_test.go @@ -24,19 +24,14 @@ func TestConfigFunc(t *testing.T) { maxLogSize := 100 t.Run("BothStartAndEnd", func(t *testing.T) { - cfg := &Config{ - LineStartPattern: "foo", - LineEndPattern: "bar", - } - - _, err := cfg.Func(unicode.UTF8, false, maxLogSize, trim.Nop) + cfg := Config{LineStartPattern: "foo", LineEndPattern: "bar"} + _, err := cfg.Func(unicode.UTF8, false, maxLogSize) assert.EqualError(t, err, "only one of line_start_pattern or line_end_pattern can be set") }) t.Run("NopEncoding", func(t *testing.T) { - cfg := &Config{} - - f, err := cfg.Func(encoding.Nop, false, maxLogSize, trim.Nop) + cfg := Config{} + f, err := cfg.Func(encoding.Nop, false, maxLogSize) assert.NoError(t, err) raw := splittest.GenerateBytes(maxLogSize * 2) @@ -47,9 +42,8 @@ func TestConfigFunc(t *testing.T) { }) t.Run("Newline", func(t *testing.T) { - cfg := &Config{} - - f, err := cfg.Func(unicode.UTF8, false, maxLogSize, trim.Nop) + cfg := Config{} + f, err := cfg.Func(unicode.UTF8, false, maxLogSize) assert.NoError(t, err) advance, token, err := f([]byte("foo\nbar\nbaz\n"), false) @@ -59,20 +53,14 @@ func TestConfigFunc(t *testing.T) { }) t.Run("InvalidStartRegex", func(t *testing.T) { - cfg := &Config{ - LineStartPattern: "[", - } - - _, err := cfg.Func(unicode.UTF8, false, maxLogSize, trim.Nop) + cfg := Config{LineStartPattern: "["} + _, err := cfg.Func(unicode.UTF8, false, maxLogSize) assert.EqualError(t, err, "compile line start regex: error parsing regexp: missing closing ]: `[`") }) t.Run("InvalidEndRegex", func(t *testing.T) { - cfg := &Config{ - LineEndPattern: "[", - } - - _, err := cfg.Func(unicode.UTF8, false, maxLogSize, trim.Nop) + cfg := Config{LineEndPattern: "["} + _, err := cfg.Func(unicode.UTF8, false, maxLogSize) assert.EqualError(t, err, "compile line end regex: error parsing regexp: missing closing ]: `[`") }) } @@ -92,8 +80,8 @@ func TestLineStartSplitFunc(t *testing.T) { Pattern: `LOGSTART \d+ `, Input: []byte(`LOGSTART 123 log1 LOGSTART 234 log2 LOGSTART 345 foo`), ExpectedTokens: []string{ - `LOGSTART 123 log1`, - `LOGSTART 234 log2`, + `LOGSTART 123 log1 `, + `LOGSTART 234 log2 `, }, }, { @@ -101,8 +89,8 @@ func TestLineStartSplitFunc(t *testing.T) { Pattern: `^LOGSTART \d+ `, Input: []byte("LOGSTART 123 LOGSTART 345 log1\nLOGSTART 234 log2\nLOGSTART 345 foo"), ExpectedTokens: []string{ - "LOGSTART 123 LOGSTART 345 log1", - "LOGSTART 234 log2", + "LOGSTART 123 LOGSTART 345 log1\n", + "LOGSTART 234 log2\n", }, }, { @@ -115,7 +103,7 @@ func TestLineStartSplitFunc(t *testing.T) { Pattern: `LOGSTART \d+ `, Input: []byte(`part that doesn't match LOGSTART 123 part that matchesLOGSTART 123 foo`), ExpectedTokens: []string{ - `part that doesn't match`, + `part that doesn't match `, `LOGSTART 123 part that matches`, }, }, @@ -161,12 +149,12 @@ func TestLineStartSplitFunc(t *testing.T) { Pattern: `^LOGSTART \d+`, Input: []byte("LOGSTART 12 log1\t \nLOGPART log1\nLOGPART log1\t \nLOGSTART 17 log2\nLOGPART log2\nanother line\nLOGSTART 43 log5"), ExpectedTokens: []string{ - "LOGSTART 12 log1\t \nLOGPART log1\nLOGPART log1", - "LOGSTART 17 log2\nLOGPART log2\nanother line", + "LOGSTART 12 log1\t \nLOGPART log1\nLOGPART log1\t \n", + "LOGSTART 17 log2\nLOGPART log2\nanother line\n", }, }, { - Name: "LogsWithoutFlusher", + Name: "NoMatch", Pattern: `^LOGSTART \d+`, Input: []byte("LOGPART log1\nLOGPART log1\t \n"), }, @@ -174,17 +162,13 @@ func TestLineStartSplitFunc(t *testing.T) { for _, tc := range testCases { cfg := Config{LineStartPattern: tc.Pattern} - trimFunc := trim.Config{ - PreserveLeading: tc.PreserveLeadingWhitespaces, - PreserveTrailing: tc.PreserveTrailingWhitespaces, - }.Func() - splitFunc, err := cfg.Func(unicode.UTF8, false, 0, trimFunc) + splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) t.Run(tc.Name, tc.Run(splitFunc)) } t.Run("FirstMatchHitsEndOfBuffer", func(t *testing.T) { - splitFunc := LineStartSplitFunc(regexp.MustCompile("LOGSTART"), false, trim.Nop) + splitFunc := LineStartSplitFunc(regexp.MustCompile("LOGSTART"), false) data := []byte(`LOGSTART`) t.Run("NotAtEOF", func(t *testing.T) { @@ -221,21 +205,21 @@ func TestLineStartSplitFunc(t *testing.T) { }, } for _, tc := range flushAtEOFCases { - cfg := &Config{ - LineStartPattern: `^LOGSTART \d+`, - } - splitFunc, err := cfg.Func(unicode.UTF8, true, 0, trim.Nop) + cfg := Config{LineStartPattern: `^LOGSTART \d+`} + splitFunc, err := cfg.Func(unicode.UTF8, true, 0) require.NoError(t, err) t.Run(tc.Name, tc.Run(splitFunc)) } }) + // TODO move to internal/splitter? t.Run("ApplyTrimFunc", func(t *testing.T) { cfg := Config{LineStartPattern: ` LOGSTART \d+ `} input := []byte(" LOGSTART 123 log1 LOGSTART 234 log2 LOGSTART 345 foo") - - splitTrimLeading, err := cfg.Func(unicode.UTF8, false, 0, trim.Leading) + splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) + + splitTrimLeading := trim.WithFunc(splitFunc, trim.Leading) t.Run("TrimLeading", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -243,8 +227,7 @@ func TestLineStartSplitFunc(t *testing.T) { `LOGSTART 234 log2 `, }}.Run(splitTrimLeading)) - splitTrimTrailing, err := cfg.Func(unicode.UTF8, false, 0, trim.Trailing) - require.NoError(t, err) + splitTrimTrailing := trim.WithFunc(splitFunc, trim.Trailing) t.Run("TrimTrailing", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -252,8 +235,7 @@ func TestLineStartSplitFunc(t *testing.T) { ` LOGSTART 234 log2`, }}.Run(splitTrimTrailing)) - splitTrimBoth, err := cfg.Func(unicode.UTF8, false, 0, trim.Whitespace) - require.NoError(t, err) + splitTrimBoth := trim.WithFunc(splitFunc, trim.Whitespace) t.Run("TrimBoth", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -288,7 +270,7 @@ func TestLineEndSplitFunc(t *testing.T) { Input: []byte("log1 LOGEND LOGEND\nlog2 LOGEND\n"), ExpectedTokens: []string{ "log1 LOGEND LOGEND", - "log2 LOGEND", + "\nlog2 LOGEND", }, }, { @@ -339,16 +321,16 @@ func TestLineEndSplitFunc(t *testing.T) { ExpectedError: errors.New("bufio.Scanner: token too long"), }, { - Name: "MultipleMultilineLogs", + Name: "MultiplesplitLogs", Pattern: `^LOGEND.*$`, Input: []byte("LOGSTART 12 log1\t \nLOGPART log1\nLOGEND log1\t \nLOGSTART 17 log2\nLOGPART log2\nLOGEND log2\nLOGSTART 43 log5"), ExpectedTokens: []string{ - "LOGSTART 12 log1\t \nLOGPART log1\nLOGEND log1", - "LOGSTART 17 log2\nLOGPART log2\nLOGEND log2", + "LOGSTART 12 log1\t \nLOGPART log1\nLOGEND log1\t ", + "\nLOGSTART 17 log2\nLOGPART log2\nLOGEND log2", }, }, { - Name: "LogsWithoutFlusher", + Name: "NoMatch", Pattern: `^LOGEND.*$`, Input: []byte("LOGPART log1\nLOGPART log1\t \n"), }, @@ -356,21 +338,14 @@ func TestLineEndSplitFunc(t *testing.T) { for _, tc := range testCases { cfg := Config{LineEndPattern: tc.Pattern} - - trimFunc := trim.Config{ - PreserveLeading: tc.PreserveLeadingWhitespaces, - PreserveTrailing: tc.PreserveTrailingWhitespaces, - }.Func() - splitFunc, err := cfg.Func(unicode.UTF8, false, 0, trimFunc) + splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) t.Run(tc.Name, tc.Run(splitFunc)) } t.Run("FlushAtEOF", func(t *testing.T) { - cfg := &Config{ - LineEndPattern: `^LOGSTART \d+`, - } - splitFunc, err := cfg.Func(unicode.UTF8, true, 0, trim.Nop) + cfg := Config{LineEndPattern: `^LOGSTART \d+`} + splitFunc, err := cfg.Func(unicode.UTF8, true, 0) require.NoError(t, err) splittest.TestCase{ Name: "NoMatch", @@ -379,12 +354,14 @@ func TestLineEndSplitFunc(t *testing.T) { }.Run(splitFunc)(t) }) + // TODO move to internal/splitter? t.Run("ApplyTrimFunc", func(t *testing.T) { cfg := Config{LineEndPattern: ` LOGEND `} input := []byte(" log1 LOGEND log2 LOGEND ") - - splitTrimLeading, err := cfg.Func(unicode.UTF8, false, 0, trim.Leading) + splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) + + splitTrimLeading := trim.WithFunc(splitFunc, trim.Leading) t.Run("TrimLeading", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -392,8 +369,7 @@ func TestLineEndSplitFunc(t *testing.T) { `log2 LOGEND `, }}.Run(splitTrimLeading)) - splitTrimTrailing, err := cfg.Func(unicode.UTF8, false, 0, trim.Trailing) - require.NoError(t, err) + splitTrimTrailing := trim.WithFunc(splitFunc, trim.Trailing) t.Run("TrimTrailing", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -401,8 +377,7 @@ func TestLineEndSplitFunc(t *testing.T) { ` log2 LOGEND`, }}.Run(splitTrimTrailing)) - splitTrimBoth, err := cfg.Func(unicode.UTF8, false, 0, trim.Whitespace) - require.NoError(t, err) + splitTrimBoth := trim.WithFunc(splitFunc, trim.Whitespace) t.Run("TrimBoth", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -484,7 +459,7 @@ func TestNewlineSplitFunc(t *testing.T) { Input: []byte("LOGPART log1"), }, { - Name: "DefaultFlusherSplits", + Name: "DefaultSplits", Input: []byte("log1\nlog2\n"), ExpectedTokens: []string{ "log1", @@ -499,48 +474,16 @@ func TestNewlineSplitFunc(t *testing.T) { "LOGEND 333", }, }, - { - Name: "PreserveLeadingWhitespaces", - Input: []byte("\n LOGEND 333 \nAnother one "), - ExpectedTokens: []string{ - "", - " LOGEND 333", - }, - PreserveLeadingWhitespaces: true, - }, - { - Name: "PreserveTrailingWhitespaces", - Input: []byte("\n LOGEND 333 \nAnother one "), - ExpectedTokens: []string{ - "", - "LOGEND 333 ", - }, - PreserveTrailingWhitespaces: true, - }, - { - Name: "PreserveBothLeadingAndTrailingWhitespaces", - Input: []byte("\n LOGEND 333 \nAnother one "), - ExpectedTokens: []string{ - "", - " LOGEND 333 ", - }, - PreserveLeadingWhitespaces: true, - PreserveTrailingWhitespaces: true, - }, } for _, tc := range testCases { - trimFunc := trim.Config{ - PreserveLeading: tc.PreserveLeadingWhitespaces, - PreserveTrailing: tc.PreserveTrailingWhitespaces, - }.Func() - splitFunc, err := NewlineSplitFunc(unicode.UTF8, false, trimFunc) + splitFunc, err := NewlineSplitFunc(unicode.UTF8, false) require.NoError(t, err) t.Run(tc.Name, tc.Run(splitFunc)) } t.Run("FlushAtEOF", func(t *testing.T) { - splitFunc, err := Config{}.Func(unicode.UTF8, true, 0, trim.Nop) + splitFunc, err := Config{}.Func(unicode.UTF8, true, 0) require.NoError(t, err) splittest.TestCase{ Name: "FlushAtEOF", @@ -549,12 +492,14 @@ func TestNewlineSplitFunc(t *testing.T) { }.Run(splitFunc)(t) }) + // // TODO move to internal/splitter? t.Run("ApplyTrimFunc", func(t *testing.T) { - cfg := &Config{} + cfg := Config{} input := []byte(" log1 \n log2 \n") - - splitTrimLeading, err := cfg.Func(unicode.UTF8, false, 0, trim.Leading) + splitFunc, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) + + splitTrimLeading := trim.WithFunc(splitFunc, trim.Leading) t.Run("TrimLeading", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -562,8 +507,7 @@ func TestNewlineSplitFunc(t *testing.T) { `log2 `, }}.Run(splitTrimLeading)) - splitTrimTrailing, err := cfg.Func(unicode.UTF8, false, 0, trim.Trailing) - require.NoError(t, err) + splitTrimTrailing := trim.WithFunc(splitFunc, trim.Trailing) t.Run("TrimTrailing", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -571,8 +515,9 @@ func TestNewlineSplitFunc(t *testing.T) { ` log2`, }}.Run(splitTrimTrailing)) - splitTrimBoth, err := cfg.Func(unicode.UTF8, false, 0, trim.Whitespace) + splitTrimBoth, err := cfg.Func(unicode.UTF8, false, 0) require.NoError(t, err) + splitTrimBoth = trim.WithFunc(splitTrimBoth, trim.Whitespace) t.Run("TrimBoth", splittest.TestCase{ Input: input, ExpectedTokens: []string{ @@ -645,13 +590,11 @@ func TestNoSplitFunc(t *testing.T) { func TestNoopEncodingError(t *testing.T) { endCfg := Config{LineEndPattern: "\n"} - - _, err := endCfg.Func(encoding.Nop, false, 0, trim.Nop) + _, err := endCfg.Func(encoding.Nop, false, 0) require.Equal(t, err, fmt.Errorf("line_start_pattern or line_end_pattern should not be set when using nop encoding")) startCfg := Config{LineStartPattern: "\n"} - - _, err = startCfg.Func(encoding.Nop, false, 0, trim.Nop) + _, err = startCfg.Func(encoding.Nop, false, 0) require.Equal(t, err, fmt.Errorf("line_start_pattern or line_end_pattern should not be set when using nop encoding")) } @@ -712,7 +655,7 @@ func TestNewlineSplitFunc_Encodings(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - splitFunc, err := NewlineSplitFunc(tc.encoding, false, trim.Nop) + splitFunc, err := NewlineSplitFunc(tc.encoding, false) require.NoError(t, err) scanner := bufio.NewScanner(bytes.NewReader(tc.input)) scanner.Split(splitFunc) diff --git a/pkg/stanza/split/splittest/splittest.go b/pkg/stanza/split/splittest/splittest.go index 1ea3ebc2c3ca..b784b1b68dec 100644 --- a/pkg/stanza/split/splittest/splittest.go +++ b/pkg/stanza/split/splittest/splittest.go @@ -71,18 +71,16 @@ func (r *testReader) splitFunc(split bufio.SplitFunc) bufio.SplitFunc { } type TestCase struct { - Name string - Pattern string - Input []byte - ExpectedTokens []string - ExpectedError error - Sleep time.Duration - AdditionalIterations int - PreserveLeadingWhitespaces bool - PreserveTrailingWhitespaces bool + Name string + Pattern string + Input []byte + ExpectedTokens []string + ExpectedError error + Sleep time.Duration + AdditionalIterations int } -func (tc TestCase) Run(split bufio.SplitFunc) func(t *testing.T) { +func (tc TestCase) Run(splitFunc bufio.SplitFunc) func(t *testing.T) { reader := newTestReader(tc.Input) return func(t *testing.T) { @@ -94,7 +92,7 @@ func (tc TestCase) Run(split bufio.SplitFunc) func(t *testing.T) { } reader.Reset() scanner := bufio.NewScanner(reader) - scanner.Split(reader.splitFunc(split)) + scanner.Split(reader.splitFunc(splitFunc)) for { ok := scanner.Scan() if !ok { diff --git a/pkg/stanza/trim/trim.go b/pkg/stanza/trim/trim.go index 90118dfc6543..20e48e3fe3da 100644 --- a/pkg/stanza/trim/trim.go +++ b/pkg/stanza/trim/trim.go @@ -4,11 +4,22 @@ package trim // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/trim" import ( + "bufio" "bytes" ) type Func func([]byte) []byte +func WithFunc(splitFunc bufio.SplitFunc, trimFunc Func) bufio.SplitFunc { + if trimFunc == nil { + return splitFunc + } + return func(data []byte, atEOF bool) (advance int, token []byte, err error) { + advance, token, err = splitFunc(data, atEOF) + return advance, trimFunc(token), err + } +} + type Config struct { PreserveLeading bool `mapstructure:"preserve_leading_whitespaces,omitempty"` PreserveTrailing bool `mapstructure:"preserve_trailing_whitespaces,omitempty"` @@ -27,27 +38,24 @@ func (c Config) Func() Func { return Whitespace } -func Nop(token []byte) []byte { +var Nop Func = func(token []byte) []byte { return token } -func Leading(data []byte) []byte { - // TrimLeft to strip EOF whitespaces in case of using $ in regex - // For some reason newline and carriage return are being moved to beginning of next log +var Leading Func = func(data []byte) []byte { token := bytes.TrimLeft(data, "\r\n\t ") - - // TrimLeft will return nil if data is an empty slice if token == nil { - return []byte{} + // TrimLeft sometimes overwrites something with nothing. + // We need to override this behavior in order to preserve empty tokens. + return data } return token } -func Trailing(data []byte) []byte { - // TrimRight to strip all whitespaces from the end of log +var Trailing Func = func(data []byte) []byte { return bytes.TrimRight(data, "\r\n\t ") } -func Whitespace(data []byte) []byte { +var Whitespace Func = func(data []byte) []byte { return Leading(Trailing(data)) } diff --git a/pkg/stanza/trim/trim_test.go b/pkg/stanza/trim/trim_test.go index 114a645853ee..5db6cf44f77f 100644 --- a/pkg/stanza/trim/trim_test.go +++ b/pkg/stanza/trim/trim_test.go @@ -4,6 +4,7 @@ package trim import ( + "bufio" "testing" "github.com/stretchr/testify/assert" @@ -15,36 +16,50 @@ func TestTrim(t *testing.T) { name string preserveLeading bool preserveTrailing bool - input string - expect string + input []byte + expect []byte }{ { name: "preserve both", preserveLeading: true, preserveTrailing: true, - input: " hello world ", - expect: " hello world ", + input: []byte(" hello world "), + expect: []byte(" hello world "), }, { name: "preserve leading", preserveLeading: true, preserveTrailing: false, - input: " hello world ", - expect: " hello world", + input: []byte(" hello world "), + expect: []byte(" hello world"), }, { name: "preserve trailing", preserveLeading: false, preserveTrailing: true, - input: " hello world ", - expect: "hello world ", + input: []byte(" hello world "), + expect: []byte("hello world "), }, { name: "preserve neither", preserveLeading: false, preserveTrailing: false, - input: " hello world ", - expect: "hello world", + input: []byte(" hello world "), + expect: []byte("hello world"), + }, + { + name: "trim leading returns nil when given nil", + preserveLeading: false, + preserveTrailing: true, + input: nil, + expect: nil, + }, + { + name: "trim leading returns []byte when given []byte", + preserveLeading: false, + preserveTrailing: true, + input: []byte{}, + expect: []byte{}, }, } @@ -54,10 +69,55 @@ func TestTrim(t *testing.T) { PreserveLeading: tc.preserveLeading, PreserveTrailing: tc.preserveTrailing, }.Func() - assert.Equal(t, []byte(tc.expect), trimFunc([]byte(tc.input))) - - // Also test that regardless of configuration, an empty []byte in gives an empty []byte out - assert.Equal(t, []byte{}, trimFunc([]byte{})) + assert.Equal(t, tc.expect, trimFunc(tc.input)) }) } } + +func TestWithFunc(t *testing.T) { + scanAndTrimLines := WithFunc(bufio.ScanLines, Config{ + PreserveLeading: false, + PreserveTrailing: false, + }.Func()) + + input := []byte(" hello \n world \n extra ") + + advance, token, err := scanAndTrimLines(input, false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("hello"), token) + + advance, token, err = scanAndTrimLines(input[8:], false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte("world"), token) + + advance, token, err = scanAndTrimLines(input[16:], false) + assert.NoError(t, err) + assert.Equal(t, 0, advance) + assert.Nil(t, token) +} + +func TestWithNilTrimFunc(t *testing.T) { + // Same test as above, but pass nil instead of a trim func + // In other words, we should expect exactly the behavior of bufio.ScanLines. + + scanLines := WithFunc(bufio.ScanLines, nil) + + input := []byte(" hello \n world \n extra ") + + advance, token, err := scanLines(input, false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte(" hello "), token) + + advance, token, err = scanLines(input[8:], false) + assert.NoError(t, err) + assert.Equal(t, 8, advance) + assert.Equal(t, []byte(" world "), token) + + advance, token, err = scanLines(input[16:], false) + assert.NoError(t, err) + assert.Equal(t, 0, advance) + assert.Nil(t, token) +} diff --git a/processor/attributesprocessor/go.mod b/processor/attributesprocessor/go.mod index f96a336ae800..53704ac51703 100644 --- a/processor/attributesprocessor/go.mod +++ b/processor/attributesprocessor/go.mod @@ -19,7 +19,7 @@ require ( require ( github.com/alecthomas/participle/v2 v2.1.0 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gobwas/glob v0.2.3 // indirect diff --git a/processor/attributesprocessor/go.sum b/processor/attributesprocessor/go.sum index 0299d2eab53b..7d6c4b14a1cf 100644 --- a/processor/attributesprocessor/go.sum +++ b/processor/attributesprocessor/go.sum @@ -11,8 +11,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/processor/filterprocessor/go.mod b/processor/filterprocessor/go.mod index 18ebaa521d3f..f1f8dc92ef57 100644 --- a/processor/filterprocessor/go.mod +++ b/processor/filterprocessor/go.mod @@ -18,7 +18,7 @@ require ( require ( github.com/alecthomas/participle/v2 v2.1.0 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gobwas/glob v0.2.3 // indirect diff --git a/processor/filterprocessor/go.sum b/processor/filterprocessor/go.sum index 0299d2eab53b..7d6c4b14a1cf 100644 --- a/processor/filterprocessor/go.sum +++ b/processor/filterprocessor/go.sum @@ -11,8 +11,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/processor/logstransformprocessor/go.mod b/processor/logstransformprocessor/go.mod index 9cde1c298e63..5cce74379fe1 100644 --- a/processor/logstransformprocessor/go.mod +++ b/processor/logstransformprocessor/go.mod @@ -17,7 +17,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/processor/logstransformprocessor/go.sum b/processor/logstransformprocessor/go.sum index 5cb31791432e..70d8cacfdca0 100644 --- a/processor/logstransformprocessor/go.sum +++ b/processor/logstransformprocessor/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go index 7c0588dfa212..6eea0a12012d 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go +++ b/processor/transformprocessor/internal/metrics/func_convert_gauge_to_sum.go @@ -14,8 +14,8 @@ import ( ) type convertGaugeToSumArguments struct { - StringAggTemp string `ottlarg:"0"` - Monotonic bool `ottlarg:"1"` + StringAggTemp string + Monotonic bool } func newConvertGaugeToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { diff --git a/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go b/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go index 3344e02c6992..dfd5bc550bd1 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go +++ b/processor/transformprocessor/internal/metrics/func_convert_summary_count_val_to_sum.go @@ -14,8 +14,8 @@ import ( ) type convertSummaryCountValToSumArguments struct { - StringAggTemp string `ottlarg:"0"` - Monotonic bool `ottlarg:"1"` + StringAggTemp string + Monotonic bool } func newConvertSummaryCountValToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { diff --git a/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go b/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go index 1ec5f767e475..d5557a87b415 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go +++ b/processor/transformprocessor/internal/metrics/func_convert_summary_sum_val_to_sum.go @@ -14,8 +14,8 @@ import ( ) type convertSummarySumValToSumArguments struct { - StringAggTemp string `ottlarg:"0"` - Monotonic bool `ottlarg:"1"` + StringAggTemp string + Monotonic bool } func newConvertSummarySumValToSumFactory() ottl.Factory[ottldatapoint.TransformContext] { diff --git a/processor/transformprocessor/internal/metrics/func_extract_count_metric.go b/processor/transformprocessor/internal/metrics/func_extract_count_metric.go index 6199618489b1..b10dfc4ff873 100644 --- a/processor/transformprocessor/internal/metrics/func_extract_count_metric.go +++ b/processor/transformprocessor/internal/metrics/func_extract_count_metric.go @@ -14,7 +14,7 @@ import ( ) type extractCountMetricArguments struct { - Monotonic bool `ottlarg:"0"` + Monotonic bool } func newExtractCountMetricFactory() ottl.Factory[ottlmetric.TransformContext] { diff --git a/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go b/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go index 68122c4f471f..63cd74d067ee 100644 --- a/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go +++ b/processor/transformprocessor/internal/metrics/func_extract_sum_metric.go @@ -15,7 +15,7 @@ import ( ) type extractSumMetricArguments struct { - Monotonic bool `ottlarg:"0"` + Monotonic bool } func newExtractSumMetricFactory() ottl.Factory[ottlmetric.TransformContext] { diff --git a/receiver/azureeventhubreceiver/go.mod b/receiver/azureeventhubreceiver/go.mod index d4dfb6abe0d3..a173ad3153f7 100644 --- a/receiver/azureeventhubreceiver/go.mod +++ b/receiver/azureeventhubreceiver/go.mod @@ -33,7 +33,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/receiver/azureeventhubreceiver/go.sum b/receiver/azureeventhubreceiver/go.sum index b4ee0dfd46e8..b8f963c4e5fb 100644 --- a/receiver/azureeventhubreceiver/go.sum +++ b/receiver/azureeventhubreceiver/go.sum @@ -72,8 +72,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/filelogreceiver/go.mod b/receiver/filelogreceiver/go.mod index e3c48ce3a913..d519c7d8ca62 100644 --- a/receiver/filelogreceiver/go.mod +++ b/receiver/filelogreceiver/go.mod @@ -17,7 +17,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/receiver/filelogreceiver/go.sum b/receiver/filelogreceiver/go.sum index 9fe0cca70abf..0751912646c7 100644 --- a/receiver/filelogreceiver/go.sum +++ b/receiver/filelogreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/journaldreceiver/go.mod b/receiver/journaldreceiver/go.mod index df49e07513db..e58f215d827c 100644 --- a/receiver/journaldreceiver/go.mod +++ b/receiver/journaldreceiver/go.mod @@ -13,7 +13,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/receiver/journaldreceiver/go.sum b/receiver/journaldreceiver/go.sum index 5cb31791432e..70d8cacfdca0 100644 --- a/receiver/journaldreceiver/go.sum +++ b/receiver/journaldreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/mongodbatlasreceiver/go.mod b/receiver/mongodbatlasreceiver/go.mod index 1ae5b7bf99a1..369753b517fc 100644 --- a/receiver/mongodbatlasreceiver/go.mod +++ b/receiver/mongodbatlasreceiver/go.mod @@ -26,7 +26,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/receiver/mongodbatlasreceiver/go.sum b/receiver/mongodbatlasreceiver/go.sum index a19220cc216e..54ed1f5df33e 100644 --- a/receiver/mongodbatlasreceiver/go.sum +++ b/receiver/mongodbatlasreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/otlpjsonfilereceiver/go.mod b/receiver/otlpjsonfilereceiver/go.mod index 67ce16537106..3dc8a79863b2 100644 --- a/receiver/otlpjsonfilereceiver/go.mod +++ b/receiver/otlpjsonfilereceiver/go.mod @@ -15,7 +15,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/receiver/otlpjsonfilereceiver/go.sum b/receiver/otlpjsonfilereceiver/go.sum index 8d0cce512c51..c06b24015ace 100644 --- a/receiver/otlpjsonfilereceiver/go.sum +++ b/receiver/otlpjsonfilereceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/receivercreator/go.mod b/receiver/receivercreator/go.mod index 0ea5677ad4a6..0bda1bb1ea2e 100644 --- a/receiver/receivercreator/go.mod +++ b/receiver/receivercreator/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receiv go 1.20 require ( - github.com/antonmedv/expr v1.15.1 + github.com/antonmedv/expr v1.15.2 github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.85.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.85.0 diff --git a/receiver/receivercreator/go.sum b/receiver/receivercreator/go.sum index f5569a83347c..f7d231c46a0c 100644 --- a/receiver/receivercreator/go.sum +++ b/receiver/receivercreator/go.sum @@ -42,8 +42,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/redisreceiver/documentation.md b/receiver/redisreceiver/documentation.md index 4483a9b8d8af..759d107267f6 100644 --- a/receiver/redisreceiver/documentation.md +++ b/receiver/redisreceiver/documentation.md @@ -292,6 +292,21 @@ Total number of calls for a command | ---- | ----------- | ------ | | cmd | Redis command name | Any Str | +### redis.cmd.latency + +Command execution latency + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| us | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| cmd | Redis command name | Any Str | +| percentile | Percentile | Str: ``p50``, ``p99``, ``p99.9`` | + ### redis.cmd.usec Total time for all executions of this command diff --git a/receiver/redisreceiver/internal/metadata/generated_config.go b/receiver/redisreceiver/internal/metadata/generated_config.go index 8fa7b783bf7e..cea09b3550e9 100644 --- a/receiver/redisreceiver/internal/metadata/generated_config.go +++ b/receiver/redisreceiver/internal/metadata/generated_config.go @@ -30,6 +30,7 @@ type MetricsConfig struct { RedisClientsMaxInputBuffer MetricConfig `mapstructure:"redis.clients.max_input_buffer"` RedisClientsMaxOutputBuffer MetricConfig `mapstructure:"redis.clients.max_output_buffer"` RedisCmdCalls MetricConfig `mapstructure:"redis.cmd.calls"` + RedisCmdLatency MetricConfig `mapstructure:"redis.cmd.latency"` RedisCmdUsec MetricConfig `mapstructure:"redis.cmd.usec"` RedisCommands MetricConfig `mapstructure:"redis.commands"` RedisCommandsProcessed MetricConfig `mapstructure:"redis.commands.processed"` @@ -77,6 +78,9 @@ func DefaultMetricsConfig() MetricsConfig { RedisCmdCalls: MetricConfig{ Enabled: false, }, + RedisCmdLatency: MetricConfig{ + Enabled: false, + }, RedisCmdUsec: MetricConfig{ Enabled: false, }, diff --git a/receiver/redisreceiver/internal/metadata/generated_config_test.go b/receiver/redisreceiver/internal/metadata/generated_config_test.go index c3a9e64ff4f4..f0043cd73831 100644 --- a/receiver/redisreceiver/internal/metadata/generated_config_test.go +++ b/receiver/redisreceiver/internal/metadata/generated_config_test.go @@ -31,6 +31,7 @@ func TestMetricsBuilderConfig(t *testing.T) { RedisClientsMaxInputBuffer: MetricConfig{Enabled: true}, RedisClientsMaxOutputBuffer: MetricConfig{Enabled: true}, RedisCmdCalls: MetricConfig{Enabled: true}, + RedisCmdLatency: MetricConfig{Enabled: true}, RedisCmdUsec: MetricConfig{Enabled: true}, RedisCommands: MetricConfig{Enabled: true}, RedisCommandsProcessed: MetricConfig{Enabled: true}, @@ -74,6 +75,7 @@ func TestMetricsBuilderConfig(t *testing.T) { RedisClientsMaxInputBuffer: MetricConfig{Enabled: false}, RedisClientsMaxOutputBuffer: MetricConfig{Enabled: false}, RedisCmdCalls: MetricConfig{Enabled: false}, + RedisCmdLatency: MetricConfig{Enabled: false}, RedisCmdUsec: MetricConfig{Enabled: false}, RedisCommands: MetricConfig{Enabled: false}, RedisCommandsProcessed: MetricConfig{Enabled: false}, diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics.go b/receiver/redisreceiver/internal/metadata/generated_metrics.go index a225a72d0a3a..5ec5d6383432 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics.go @@ -11,6 +11,36 @@ import ( "go.opentelemetry.io/collector/receiver" ) +// AttributePercentile specifies the a value percentile attribute. +type AttributePercentile int + +const ( + _ AttributePercentile = iota + AttributePercentileP50 + AttributePercentileP99 + AttributePercentileP999 +) + +// String returns the string representation of the AttributePercentile. +func (av AttributePercentile) String() string { + switch av { + case AttributePercentileP50: + return "p50" + case AttributePercentileP99: + return "p99" + case AttributePercentileP999: + return "p99.9" + } + return "" +} + +// MapAttributePercentile is a helper map of string to AttributePercentile attribute value. +var MapAttributePercentile = map[string]AttributePercentile{ + "p50": AttributePercentileP50, + "p99": AttributePercentileP99, + "p99.9": AttributePercentileP999, +} + // AttributeRole specifies the a value role attribute. type AttributeRole int @@ -332,6 +362,58 @@ func newMetricRedisCmdCalls(cfg MetricConfig) metricRedisCmdCalls { return m } +type metricRedisCmdLatency struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills redis.cmd.latency metric with initial data. +func (m *metricRedisCmdLatency) init() { + m.data.SetName("redis.cmd.latency") + m.data.SetDescription("Command execution latency") + m.data.SetUnit("us") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricRedisCmdLatency) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cmdAttributeValue string, percentileAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("cmd", cmdAttributeValue) + dp.Attributes().PutStr("percentile", percentileAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRedisCmdLatency) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRedisCmdLatency) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRedisCmdLatency(cfg MetricConfig) metricRedisCmdLatency { + m := metricRedisCmdLatency{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricRedisCmdUsec struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1759,6 +1841,7 @@ type MetricsBuilder struct { metricRedisClientsMaxInputBuffer metricRedisClientsMaxInputBuffer metricRedisClientsMaxOutputBuffer metricRedisClientsMaxOutputBuffer metricRedisCmdCalls metricRedisCmdCalls + metricRedisCmdLatency metricRedisCmdLatency metricRedisCmdUsec metricRedisCmdUsec metricRedisCommands metricRedisCommands metricRedisCommandsProcessed metricRedisCommandsProcessed @@ -1810,6 +1893,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricRedisClientsMaxInputBuffer: newMetricRedisClientsMaxInputBuffer(mbc.Metrics.RedisClientsMaxInputBuffer), metricRedisClientsMaxOutputBuffer: newMetricRedisClientsMaxOutputBuffer(mbc.Metrics.RedisClientsMaxOutputBuffer), metricRedisCmdCalls: newMetricRedisCmdCalls(mbc.Metrics.RedisCmdCalls), + metricRedisCmdLatency: newMetricRedisCmdLatency(mbc.Metrics.RedisCmdLatency), metricRedisCmdUsec: newMetricRedisCmdUsec(mbc.Metrics.RedisCmdUsec), metricRedisCommands: newMetricRedisCommands(mbc.Metrics.RedisCommands), metricRedisCommandsProcessed: newMetricRedisCommandsProcessed(mbc.Metrics.RedisCommandsProcessed), @@ -1904,6 +1988,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricRedisClientsMaxInputBuffer.emit(ils.Metrics()) mb.metricRedisClientsMaxOutputBuffer.emit(ils.Metrics()) mb.metricRedisCmdCalls.emit(ils.Metrics()) + mb.metricRedisCmdLatency.emit(ils.Metrics()) mb.metricRedisCmdUsec.emit(ils.Metrics()) mb.metricRedisCommands.emit(ils.Metrics()) mb.metricRedisCommandsProcessed.emit(ils.Metrics()) @@ -1977,6 +2062,11 @@ func (mb *MetricsBuilder) RecordRedisCmdCallsDataPoint(ts pcommon.Timestamp, val mb.metricRedisCmdCalls.recordDataPoint(mb.startTime, ts, val, cmdAttributeValue) } +// RecordRedisCmdLatencyDataPoint adds a data point to redis.cmd.latency metric. +func (mb *MetricsBuilder) RecordRedisCmdLatencyDataPoint(ts pcommon.Timestamp, val float64, cmdAttributeValue string, percentileAttributeValue AttributePercentile) { + mb.metricRedisCmdLatency.recordDataPoint(mb.startTime, ts, val, cmdAttributeValue, percentileAttributeValue.String()) +} + // RecordRedisCmdUsecDataPoint adds a data point to redis.cmd.usec metric. func (mb *MetricsBuilder) RecordRedisCmdUsecDataPoint(ts pcommon.Timestamp, val int64, cmdAttributeValue string) { mb.metricRedisCmdUsec.recordDataPoint(mb.startTime, ts, val, cmdAttributeValue) diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics_test.go b/receiver/redisreceiver/internal/metadata/generated_metrics_test.go index 889d9c395250..683a075347e3 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics_test.go @@ -73,6 +73,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordRedisCmdCallsDataPoint(ts, 1, "cmd-val") + allMetricsCount++ + mb.RecordRedisCmdLatencyDataPoint(ts, 1, "cmd-val", AttributePercentileP50) + allMetricsCount++ mb.RecordRedisCmdUsecDataPoint(ts, 1, "cmd-val") @@ -275,6 +278,24 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("cmd") assert.True(t, ok) assert.EqualValues(t, "cmd-val", attrVal.Str()) + case "redis.cmd.latency": + assert.False(t, validatedMetrics["redis.cmd.latency"], "Found a duplicate in the metrics slice: redis.cmd.latency") + validatedMetrics["redis.cmd.latency"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Command execution latency", ms.At(i).Description()) + assert.Equal(t, "us", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("cmd") + assert.True(t, ok) + assert.EqualValues(t, "cmd-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("percentile") + assert.True(t, ok) + assert.EqualValues(t, "p50", attrVal.Str()) case "redis.cmd.usec": assert.False(t, validatedMetrics["redis.cmd.usec"], "Found a duplicate in the metrics slice: redis.cmd.usec") validatedMetrics["redis.cmd.usec"] = true diff --git a/receiver/redisreceiver/internal/metadata/testdata/config.yaml b/receiver/redisreceiver/internal/metadata/testdata/config.yaml index d546d3e12fe3..33972c5b607c 100644 --- a/receiver/redisreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/redisreceiver/internal/metadata/testdata/config.yaml @@ -11,6 +11,8 @@ all_set: enabled: true redis.cmd.calls: enabled: true + redis.cmd.latency: + enabled: true redis.cmd.usec: enabled: true redis.commands: @@ -82,6 +84,8 @@ none_set: enabled: false redis.cmd.calls: enabled: false + redis.cmd.latency: + enabled: false redis.cmd.usec: enabled: false redis.commands: diff --git a/receiver/redisreceiver/latencystats.go b/receiver/redisreceiver/latencystats.go new file mode 100644 index 000000000000..fdfecc08a1b6 --- /dev/null +++ b/receiver/redisreceiver/latencystats.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package redisreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver" + +import ( + "fmt" + "strconv" + "strings" +) + +// Holds percentile latencies, e.g. "p99" -> 1.5. +type latencies map[string]float64 + +// parseLatencyStats parses the values part of one entry in Redis latencystats section, +// e.g. "p50=181.247,p99=309.247,p99.9=1023.999". +func parseLatencyStats(str string) (latencies, error) { + res := make(latencies) + + pairs := strings.Split(strings.TrimSpace(str), ",") + + for _, pairStr := range pairs { + pair := strings.Split(pairStr, "=") + if len(pair) != 2 { + return nil, fmt.Errorf("unexpected latency percentiles pair '%s'", pairStr) + } + + key := pair[0] + valueStr := pair[1] + + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + return nil, err + } + + res[key] = value + } + + return res, nil +} diff --git a/receiver/redisreceiver/latencystats_test.go b/receiver/redisreceiver/latencystats_test.go new file mode 100644 index 000000000000..31b7930a7e8f --- /dev/null +++ b/receiver/redisreceiver/latencystats_test.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package redisreceiver + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseLatencyStats(t *testing.T) { + ls, err := parseLatencyStats("p50=181.247,p55=182.271,p99=309.247,p99.9=1023.999") + require.Nil(t, err) + require.Equal(t, ls["p50"], 181.247) + require.Equal(t, ls["p55"], 182.271) + require.Equal(t, ls["p99"], 309.247) + require.Equal(t, ls["p99.9"], 1023.999) +} + +func TestParseMalformedLatencyStats(t *testing.T) { + tests := []struct{ name, stats string }{ + {"missing value", "p50=42.0,p90=50.0,p99.9="}, + {"missing equals", "p50=42.0,p90=50.0,p99.9"}, + {"extra comma", "p50=42.0,,p90=50.0"}, + {"wrong value type", "p50=asdf"}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := parseLatencyStats(test.stats) + require.NotNil(t, err) + }) + } +} diff --git a/receiver/redisreceiver/metadata.yaml b/receiver/redisreceiver/metadata.yaml index 64628fb3ca75..75e696aa7ac0 100644 --- a/receiver/redisreceiver/metadata.yaml +++ b/receiver/redisreceiver/metadata.yaml @@ -45,6 +45,13 @@ attributes: cmd: description: Redis command name type: string + percentile: + description: Percentile + type: string + enum: + - p50 + - p99 + - p99.9 metrics: redis.maxmemory: @@ -84,6 +91,14 @@ metrics: aggregation_temporality: cumulative attributes: [cmd] + redis.cmd.latency: + enabled: false + description: Command execution latency + unit: us + gauge: + value_type: double + attributes: [cmd, percentile] + redis.uptime: enabled: true description: Number of seconds since Redis server start diff --git a/receiver/redisreceiver/redis_scraper.go b/receiver/redisreceiver/redis_scraper.go index efe0a0c8e433..8d242fca729d 100644 --- a/receiver/redisreceiver/redis_scraper.go +++ b/receiver/redisreceiver/redis_scraper.go @@ -93,7 +93,7 @@ func (rs *redisScraper) Scrape(context.Context) (pmetric.Metrics, error) { rs.recordCommonMetrics(now, inf) rs.recordKeyspaceMetrics(now, inf) rs.recordRoleMetrics(now, inf) - rs.recordCmdStatsMetrics(now, inf) + rs.recordCmdMetrics(now, inf) rb := rs.mb.NewResourceBuilder() rb.SetRedisVersion(rs.getRedisVersion(inf)) return rs.mb.Emit(metadata.WithResource(rb.Emit())), nil @@ -169,32 +169,58 @@ func (rs *redisScraper) recordRoleMetrics(ts pcommon.Timestamp, inf info) { } } -// recordCmdStatsMetrics records metrics from 'command_stats' Redis info key-value pairs -// e.g. "cmdstat_mget:calls=1685,usec=6032,usec_per_call=3.58,rejected_calls=0,failed_calls=0" -// but only calls and usec at the moment. -func (rs *redisScraper) recordCmdStatsMetrics(ts pcommon.Timestamp, inf info) { - cmdPrefix := "cmdstat_" +// recordCmdMetrics records per-command metrics from Redis info. +// These include command stats and command latency percentiles. +// Examples: +// +// "cmdstat_mget:calls=1685,usec=6032,usec_per_call=3.58,rejected_calls=0,failed_calls=0" +// "latency_percentiles_usec_lastsave:p50=1.003,p99=1.003,p99.9=1.003" +func (rs *redisScraper) recordCmdMetrics(ts pcommon.Timestamp, inf info) { + const cmdstatPrefix = "cmdstat_" + const latencyPrefix = "latency_percentiles_usec_" + for key, val := range inf { - if !strings.HasPrefix(key, cmdPrefix) { + if strings.HasPrefix(key, cmdstatPrefix) { + rs.recordCmdStatsMetrics(ts, key[len(cmdstatPrefix):], val) + } else if strings.HasPrefix(key, latencyPrefix) { + rs.recordCmdLatencyMetrics(ts, key[len(latencyPrefix):], val) + } + } +} + +// recordCmdStatsMetrics records metrics for a particlar Redis command. +// Only 'calls' and 'usec' are recorded at the moment. +// 'cmd' is the Redis command, 'val' is the values string (e.g. "calls=1685,usec=6032,usec_per_call=3.58,rejected_calls=0,failed_calls=0"). +func (rs *redisScraper) recordCmdStatsMetrics(ts pcommon.Timestamp, cmd, val string) { + parts := strings.Split(strings.TrimSpace(val), ",") + for _, element := range parts { + subParts := strings.Split(element, "=") + if len(subParts) == 1 { + continue + } + parsed, err := strconv.ParseInt(subParts[1], 10, 64) + if err != nil { // skip bad items continue } + if subParts[0] == "calls" { + rs.mb.RecordRedisCmdCallsDataPoint(ts, parsed, cmd) + } else if subParts[0] == "usec" { + rs.mb.RecordRedisCmdUsecDataPoint(ts, parsed, cmd) + } + } +} - cmd := key[len(cmdPrefix):] - parts := strings.Split(strings.TrimSpace(val), ",") - for _, element := range parts { - subParts := strings.Split(element, "=") - if len(subParts) == 1 { - continue - } - parsed, err := strconv.ParseInt(subParts[1], 10, 64) - if err != nil { // skip bad items - continue - } - if subParts[0] == "calls" { - rs.mb.RecordRedisCmdCallsDataPoint(ts, parsed, cmd) - } else if subParts[0] == "usec" { - rs.mb.RecordRedisCmdUsecDataPoint(ts, parsed, cmd) - } +// recordCmdLatencyMetrics record latency metrics of a particular Redis command. +// 'cmd' is the Redis command, 'val' is the values string (e.g. "p50=1.003,p99=1.003,p99.9=1.003). +func (rs *redisScraper) recordCmdLatencyMetrics(ts pcommon.Timestamp, cmd, val string) { + latencies, err := parseLatencyStats(val) + if err != nil { + return + } + + for percentile, latency := range latencies { + if percentileAttr, ok := metadata.MapAttributePercentile[percentile]; ok { + rs.mb.RecordRedisCmdLatencyDataPoint(ts, latency, cmd, percentileAttr) } } } diff --git a/receiver/splunkenterprisereceiver/README.md b/receiver/splunkenterprisereceiver/README.md new file mode 100644 index 000000000000..15ab877aef4e --- /dev/null +++ b/receiver/splunkenterprisereceiver/README.md @@ -0,0 +1,5 @@ +# Splunk Enterprise Receiver +--- + +The Splunk Enterprise Receiver is a pull based tool which enables the ingestion of key performance metrics (KPI's) describing the operational status of a user's Splunk Enterprise deployment to be +added to their OpenTelemetry Pipeline. diff --git a/receiver/splunkenterprisereceiver/client.go b/receiver/splunkenterprisereceiver/client.go new file mode 100644 index 000000000000..46e4713a14f3 --- /dev/null +++ b/receiver/splunkenterprisereceiver/client.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "context" + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" +) + +type splunkEntClient struct { + endpoint *url.URL + client *http.Client + basicAuth string +} + +func newSplunkEntClient(cfg *Config) splunkEntClient { + // tls party + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + + client := &http.Client{Transport: tr} + + endpoint, _ := url.Parse(cfg.Endpoint) + + // build and encode our auth string. Do this work once to avoid rebuilding the + // auth header every time we make a new request + authString := fmt.Sprintf("%s:%s", cfg.Username, cfg.Password) + auth64 := base64.StdEncoding.EncodeToString([]byte(authString)) + basicAuth := fmt.Sprintf("Basic %s", auth64) + + return splunkEntClient{ + client: client, + endpoint: endpoint, + basicAuth: basicAuth, + } +} + +// For running ad hoc searches only +func (c *splunkEntClient) createRequest(ctx context.Context, sr *searchResponse) (*http.Request, error) { + // Running searches via Splunk's REST API is a two step process: First you submit the job to run + // this returns a jobid which is then used in the second part to retrieve the search results + if sr.Jobid == nil { + path := "/services/search/jobs/" + url, _ := url.JoinPath(c.endpoint.String(), path) + + // reader for the response data + data := strings.NewReader(sr.search) + + // return the build request, ready to be run by makeRequest + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, data) + if err != nil { + return nil, err + } + + // Required headers + req.Header.Add("Authorization", c.basicAuth) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + return req, nil + } + path := fmt.Sprintf("/services/search/jobs/%s/results", *sr.Jobid) + url, _ := url.JoinPath(c.endpoint.String(), path) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + // Required headers + req.Header.Add("Authorization", c.basicAuth) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + return req, nil +} + +func (c *splunkEntClient) createAPIRequest(ctx context.Context, apiEndpoint string) (*http.Request, error) { + url := c.endpoint.String() + apiEndpoint + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + // Required headers + req.Header.Add("Authorization", c.basicAuth) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + return req, nil +} + +// Construct and perform a request to the API. Returns the searchResponse passed into the +// function as state +func (c *splunkEntClient) makeRequest(req *http.Request) (*http.Response, error) { + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + + return res, nil +} diff --git a/receiver/splunkenterprisereceiver/client_test.go b/receiver/splunkenterprisereceiver/client_test.go new file mode 100644 index 000000000000..be60bc608c8d --- /dev/null +++ b/receiver/splunkenterprisereceiver/client_test.go @@ -0,0 +1,151 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/receiver/scraperhelper" +) + +func TestClientCreation(t *testing.T) { + // create a client from an example config + client := newSplunkEntClient(&Config{ + Username: "admin", + Password: "securityFirst", + MaxSearchWaitTime: 11 * time.Second, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://localhost:8089", + }, + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 10 * time.Second, + InitialDelay: 1 * time.Second, + }, + }) + + testEndpoint, _ := url.Parse("https://localhost:8089") + + authString := fmt.Sprintf("%s:%s", "admin", "securityFirst") + auth64 := base64.StdEncoding.EncodeToString([]byte(authString)) + testBasicAuth := fmt.Sprintf("Basic %s", auth64) + + require.Equal(t, client.endpoint, testEndpoint) + require.Equal(t, client.basicAuth, testBasicAuth) +} + +// test functionality of createRequest which is used for building metrics out of +// ad-hoc searches +func TestClientCreateRequest(t *testing.T) { + // create a client from an example config + client := newSplunkEntClient(&Config{ + Username: "admin", + Password: "securityFirst", + MaxSearchWaitTime: 11 * time.Second, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://localhost:8089", + }, + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 10 * time.Second, + InitialDelay: 1 * time.Second, + }, + }) + + testJobID := "123" + + tests := []struct { + desc string + sr *searchResponse + client splunkEntClient + expected *http.Request + }{ + { + desc: "First req, no jobid", + sr: &searchResponse{ + search: "example search", + }, + client: client, + expected: func() *http.Request { + method := "POST" + path := "/services/search/jobs/" + testEndpoint, _ := url.Parse("https://localhost:8089") + url, _ := url.JoinPath(testEndpoint.String(), path) + data := strings.NewReader("example search") + req, _ := http.NewRequest(method, url, data) + req.Header.Add("Authorization", client.basicAuth) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + return req + }(), + }, + { + desc: "Second req, jobID detected", + sr: &searchResponse{ + search: "example search", + Jobid: &testJobID, + }, + client: client, + expected: func() *http.Request { + method := "GET" + path := fmt.Sprintf("/services/search/jobs/%s/results", testJobID) + testEndpoint, _ := url.Parse("https://localhost:8089") + url, _ := url.JoinPath(testEndpoint.String(), path) + req, _ := http.NewRequest(method, url, nil) + req.Header.Add("Authorization", client.basicAuth) + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + return req + }(), + }, + } + + ctx := context.Background() + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + req, err := test.client.createRequest(ctx, test.sr) + require.NoError(t, err) + // have to test specific parts since individual fields are pointers + require.Equal(t, test.expected.URL, req.URL) + require.Equal(t, test.expected.Method, req.Method) + require.Equal(t, test.expected.Header, req.Header) + require.Equal(t, test.expected.Body, req.Body) + }) + } +} + +// createAPIRequest creates a request for api calls i.e. to introspection endpoint +func TestAPIRequestCreate(t *testing.T) { + client := newSplunkEntClient(&Config{ + Username: "admin", + Password: "securityFirst", + MaxSearchWaitTime: 11 * time.Second, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://localhost:8089", + }, + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 10 * time.Second, + InitialDelay: 1 * time.Second, + }, + }) + + ctx := context.Background() + req, err := client.createAPIRequest(ctx, "/test/endpoint") + require.NoError(t, err) + + expectedURL := client.endpoint.String() + "/test/endpoint" + expected, _ := http.NewRequest(http.MethodGet, expectedURL, nil) + expected.Header.Add("Authorization", client.basicAuth) + expected.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + require.Equal(t, expected.URL, req.URL) + require.Equal(t, expected.Method, req.Method) + require.Equal(t, expected.Header, req.Header) + require.Equal(t, expected.Body, req.Body) +} diff --git a/receiver/splunkenterprisereceiver/config.go b/receiver/splunkenterprisereceiver/config.go index dd66eea50ac0..48546777b71d 100644 --- a/receiver/splunkenterprisereceiver/config.go +++ b/receiver/splunkenterprisereceiver/config.go @@ -2,3 +2,65 @@ // SPDX-License-Identifier: Apache-2.0 package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "errors" + "net/url" + "strings" + "time" + + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/receiver/scraperhelper" + "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" +) + +var ( + errBadOrMissingEndpoint = errors.New("Missing a valid endpoint") + errMissingUsername = errors.New("Missing valid username") + errMissingPassword = errors.New("Missing valid password") + errBadScheme = errors.New("Endpoint scheme must be either http or https") +) + +type Config struct { + confighttp.HTTPClientSettings `mapstructure:",squash"` + scraperhelper.ScraperControllerSettings `mapstructure:",squash"` + metadata.MetricsBuilderConfig `mapstructure:",squash"` + // Username and password with associated with an account with + // permission to access the Splunk deployments REST api + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + // default is 60s + MaxSearchWaitTime time.Duration `mapstructure:"max_search_wait_time"` +} + +func (cfg *Config) Validate() (errors error) { + var targetURL *url.URL + + if cfg.Endpoint == "" { + errors = multierr.Append(errors, errBadOrMissingEndpoint) + } else { + // we want to validate that the endpoint url supplied by user is at least + // a little bit valid + var err error + targetURL, err = url.Parse(cfg.Endpoint) + if err != nil { + errors = multierr.Append(errors, errBadOrMissingEndpoint) + } + + if !strings.HasPrefix(targetURL.Scheme, "http") { + errors = multierr.Append(errors, errBadScheme) + } + } + + if cfg.Username == "" { + errors = multierr.Append(errors, errMissingUsername) + } + + if cfg.Password == "" { + errors = multierr.Append(errors, errMissingPassword) + } + + return errors +} diff --git a/receiver/splunkenterprisereceiver/config_test.go b/receiver/splunkenterprisereceiver/config_test.go index bcc05019db8f..264f9b745f32 100644 --- a/receiver/splunkenterprisereceiver/config_test.go +++ b/receiver/splunkenterprisereceiver/config_test.go @@ -1,4 +1,131 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package splunkenterprisereceiver +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/confmap/confmaptest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" +) + +func TestValidateConfig(t *testing.T) { + t.Parallel() + + var multipleErrors error + + multipleErrors = multierr.Combine(multipleErrors, errBadOrMissingEndpoint, errMissingUsername, errMissingPassword) + + tests := []struct { + desc string + expect error + conf Config + }{ + { + desc: "Missing password", + expect: errMissingPassword, + conf: Config{ + Username: "admin", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://localhost:8089", + }, + }, + }, + { + desc: "Missing username", + expect: errMissingUsername, + conf: Config{ + Password: "securityFirst", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://localhost:8089", + }, + }, + }, + { + desc: "Bad scheme (none http/s)", + expect: errBadScheme, + conf: Config{ + Password: "securityFirst", + Username: "admin", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "localhost:8089", + }, + }, + }, + { + desc: "Missing endpoint", + expect: errBadOrMissingEndpoint, + conf: Config{ + Username: "admin", + Password: "securityFirst", + }, + }, + { + desc: "Missing multiple", + expect: multipleErrors, + conf: Config{}, + }, + } + + for i := range tests { + test := tests[i] + + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + err := test.conf.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), test.expect.Error()) + }) + } +} + +func TestLoadConfig(t *testing.T) { + t.Parallel() + + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + id := component.NewID(metadata.Type) + cmSub, err := cm.Sub(id.String()) + require.NoError(t, err) + + testmetrics := metadata.DefaultMetricsBuilderConfig() + testmetrics.Metrics.SplunkLicenseIndexUsage.Enabled = true + testmetrics.Metrics.SplunkIndexerThroughput.Enabled = false + + expected := &Config{ + Username: "admin", + Password: "securityFirst", + MaxSearchWaitTime: 11 * time.Second, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "https://localhost:8089", + }, + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 10 * time.Second, + InitialDelay: 1 * time.Second, + }, + MetricsBuilderConfig: testmetrics, + } + + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + + require.NoError(t, component.UnmarshalConfig(cmSub, cfg)) + require.NoError(t, component.ValidateConfig(cfg)) + + diff := cmp.Diff(expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{})) + if diff != "" { + t.Errorf("config mismatch (-expected / +actual)\n%s", diff) + } +} diff --git a/receiver/splunkenterprisereceiver/doc.go b/receiver/splunkenterprisereceiver/doc.go new file mode 100644 index 000000000000..3ca1d952aaee --- /dev/null +++ b/receiver/splunkenterprisereceiver/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" diff --git a/receiver/splunkenterprisereceiver/documentation.md b/receiver/splunkenterprisereceiver/documentation.md new file mode 100644 index 000000000000..7ba887a71d62 --- /dev/null +++ b/receiver/splunkenterprisereceiver/documentation.md @@ -0,0 +1,41 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# splunkenterprise + +## Default Metrics + +The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: + +```yaml +metrics: + : + enabled: false +``` + +### splunk.indexer.throughput + +Gauge tracking average bytes per second throughput of indexer + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By/s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.indexer.status | The status message reported for a specific object | Any Str | + +### splunk.license.index.usage + +Gauge tracking the indexed license usage per index + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| splunk.index.name | The name of the index reporting a specific KPI | Any Str | diff --git a/receiver/splunkenterprisereceiver/factory.go b/receiver/splunkenterprisereceiver/factory.go index dd66eea50ac0..c956e99b9508 100644 --- a/receiver/splunkenterprisereceiver/factory.go +++ b/receiver/splunkenterprisereceiver/factory.go @@ -2,3 +2,63 @@ // SPDX-License-Identifier: Apache-2.0 package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "context" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" +) + +const ( + defaultInterval = 10 * time.Minute + defaultMaxSearchWaitTime = 60 * time.Second +) + +func createDefaultConfig() component.Config { + scfg := scraperhelper.NewDefaultScraperControllerSettings(metadata.Type) + scfg.CollectionInterval = defaultInterval + + return &Config{ + ScraperControllerSettings: scfg, + MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), + MaxSearchWaitTime: defaultMaxSearchWaitTime, + } +} + +func NewFactory() receiver.Factory { + return receiver.NewFactory( + metadata.Type, + createDefaultConfig, + receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), + ) +} + +func createMetricsReceiver( + _ context.Context, + params receiver.CreateSettings, + baseCfg component.Config, + consumer consumer.Metrics, +) (receiver.Metrics, error) { + cfg := baseCfg.(*Config) + splunkScraper := newSplunkMetricsScraper(params, cfg) + + scraper, err := scraperhelper.NewScraper(metadata.Type, + splunkScraper.scrape, + scraperhelper.WithStart(splunkScraper.start)) + if err != nil { + return nil, err + } + + return scraperhelper.NewScraperControllerReceiver( + &cfg.ScraperControllerSettings, + params, + consumer, + scraperhelper.AddScraper(scraper), + ) +} diff --git a/receiver/splunkenterprisereceiver/factory_test.go b/receiver/splunkenterprisereceiver/factory_test.go index bcc05019db8f..4df2a00d471b 100644 --- a/receiver/splunkenterprisereceiver/factory_test.go +++ b/receiver/splunkenterprisereceiver/factory_test.go @@ -1,4 +1,82 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package splunkenterprisereceiver +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" +) + +func TestFactoryCreate(t *testing.T) { + factory := NewFactory() + require.EqualValues(t, "splunkenterprise", factory.Type()) +} + +func TestDefaultConfig(t *testing.T) { + expectedConf := &Config{ + MaxSearchWaitTime: 60 * time.Second, + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 10 * time.Minute, + InitialDelay: 1 * time.Second, + }, + MetricsBuilderConfig: metadata.DefaultMetricsBuilderConfig(), + } + + testConf := createDefaultConfig().(*Config) + + require.Equal(t, expectedConf, testConf) +} + +func TestCreateMetricsReceiver(t *testing.T) { + tests := []struct { + desc string + run func(t *testing.T) + }{ + { + desc: "Defaults with valid config", + run: func(t *testing.T) { + t.Parallel() + + cfg := createDefaultConfig().(*Config) + + _, err := createMetricsReceiver( + context.Background(), + receivertest.NewNopCreateSettings(), + cfg, + consumertest.NewNop(), + ) + + require.NoError(t, err, "failed to create metrics receiver with valid inputs") + }, + }, + { + desc: "Missing consumer", + run: func(t *testing.T) { + t.Parallel() + + cfg := createDefaultConfig().(*Config) + + _, err := createMetricsReceiver( + context.Background(), + receivertest.NewNopCreateSettings(), + cfg, + nil, + ) + + require.Error(t, err, "created metrics receiver without consumer") + }, + }, + } + for _, test := range tests { + t.Run(test.desc, test.run) + } +} diff --git a/receiver/splunkenterprisereceiver/go.mod b/receiver/splunkenterprisereceiver/go.mod index 1329aa578e40..d01a9fc72e9b 100644 --- a/receiver/splunkenterprisereceiver/go.mod +++ b/receiver/splunkenterprisereceiver/go.mod @@ -2,29 +2,73 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunk go 1.20 -require go.opentelemetry.io/collector/component v0.85.0 +require ( + github.com/google/go-cmp v0.5.9 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.85.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.85.0 + github.com/stretchr/testify v1.8.4 + go.opentelemetry.io/collector/component v0.85.0 + go.opentelemetry.io/collector/config/confighttp v0.85.0 + go.opentelemetry.io/collector/confmap v0.85.0 + go.opentelemetry.io/collector/consumer v0.85.0 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 + go.opentelemetry.io/collector/receiver v0.85.0 + go.uber.org/multierr v1.11.0 + go.uber.org/zap v1.25.0 +) require ( + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/knadh/koanf v1.5.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.85.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rs/cors v1.10.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector v0.85.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.85.0 // indirect + go.opentelemetry.io/collector/config/configcompression v0.85.0 // indirect + go.opentelemetry.io/collector/config/configopaque v0.85.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.85.0 // indirect - go.opentelemetry.io/collector/confmap v0.85.0 // indirect + go.opentelemetry.io/collector/config/configtls v0.85.0 // indirect + go.opentelemetry.io/collector/config/internal v0.85.0 // indirect + go.opentelemetry.io/collector/exporter v0.85.0 // indirect + go.opentelemetry.io/collector/extension v0.85.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.85.0 // indirect go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 // indirect - go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 // indirect + go.opentelemetry.io/collector/processor v0.85.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 // indirect go.opentelemetry.io/otel v1.17.0 // indirect go.opentelemetry.io/otel/metric v1.17.0 // indirect + go.opentelemetry.io/otel/sdk v1.17.0 // indirect + go.opentelemetry.io/otel/sdk/metric v0.40.0 // indirect go.opentelemetry.io/otel/trace v1.17.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.25.0 // indirect golang.org/x/net v0.15.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/grpc v1.58.0 // indirect google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil + +replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal diff --git a/receiver/splunkenterprisereceiver/go.sum b/receiver/splunkenterprisereceiver/go.sum index d82543bdcfce..f956bd489793 100644 --- a/receiver/splunkenterprisereceiver/go.sum +++ b/receiver/splunkenterprisereceiver/go.sum @@ -1,5 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -24,10 +25,13 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -45,17 +49,26 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -63,6 +76,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -81,17 +96,21 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= @@ -137,10 +156,14 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= @@ -150,8 +173,10 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -161,6 +186,7 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -181,9 +207,12 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= @@ -203,19 +232,27 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rs/cors v1.10.0 h1:62NOS1h+r8p1mW6FM0FSB0exioXLhd/sh15KpjWBZ+8= +github.com/rs/cors v1.10.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= @@ -226,32 +263,73 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector v0.85.0 h1:I037iYQ2FlAOKmaesT5Hgy7VazJO5oO4rcHSgc1+K8A= +go.opentelemetry.io/collector v0.85.0/go.mod h1:jcETa0UJmwkDSyhkOTwQi8rgie1M3TjsIO98KeGM2vk= go.opentelemetry.io/collector/component v0.85.0 h1:RdeUDdX3prvkf0PFFbhMjWLsYUJdxy/d0oovOuObRDs= go.opentelemetry.io/collector/component v0.85.0/go.mod h1:C3CWpjYa+k7Vjkqes/8abJ/fkCn6FlR1sNkW4QPd+kI= +go.opentelemetry.io/collector/config/configauth v0.85.0 h1:KJquqyQ/Am3wgyOEhLHAYmLix1Mow7zUORNKitUv6GQ= +go.opentelemetry.io/collector/config/configauth v0.85.0/go.mod h1:+NTOnvvrYtuLUu5e0dM9luKjeyzHf+DPK9MEP/S24Vk= +go.opentelemetry.io/collector/config/configcompression v0.85.0 h1:QWqDla4mE+pDV+97vnA8OpBxX4VJxllsi1DSMN2M0mo= +go.opentelemetry.io/collector/config/configcompression v0.85.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= +go.opentelemetry.io/collector/config/confighttp v0.85.0 h1:CwY8qLcMdRshKCPzdZX6UnH5QU4YwuXtLNt/E6if+LY= +go.opentelemetry.io/collector/config/confighttp v0.85.0/go.mod h1:T+UpZ+VwkayTaFOAANa4nkUx1P3YlYdR6gx6mSoEwsU= +go.opentelemetry.io/collector/config/configopaque v0.85.0 h1:FdQCf88Vq2yX7kiRswN00t/oGY+BbRCaAd0paG0if1A= +go.opentelemetry.io/collector/config/configopaque v0.85.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= go.opentelemetry.io/collector/config/configtelemetry v0.85.0 h1:hxKBwHEK4enl4YKtdZCq2rxxIKHrccoChoZlVgG8vbI= go.opentelemetry.io/collector/config/configtelemetry v0.85.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= +go.opentelemetry.io/collector/config/configtls v0.85.0 h1:oYXJ5Je0218tXyJef1ri98HDEORPdToO4rM1C7D+1YI= +go.opentelemetry.io/collector/config/configtls v0.85.0/go.mod h1:IZJrxrpGoNb5W8U1ihe2lLOp0KdtBYPC6z2aoaBrRPo= +go.opentelemetry.io/collector/config/internal v0.85.0 h1:Pjr2XMqSLCHXsmzZrW7zVZf1VBcal84tYNDoEsu8loA= +go.opentelemetry.io/collector/config/internal v0.85.0/go.mod h1:XN8Y+Vhq/RqxGry7CGKwhMXJptUrmWTHdC6ZeI+Uy9E= go.opentelemetry.io/collector/confmap v0.85.0 h1:xyshTMElkpCJRCbg9OyGL41f7ToCr+PRBJKuAbGR17I= go.opentelemetry.io/collector/confmap v0.85.0/go.mod h1:/SNHqYkLagF0TjBjQyCy2Gt3ZF6hTK8VKbaan/ZHuJs= +go.opentelemetry.io/collector/consumer v0.85.0 h1:YG1yQHhuLWLNANyRIqM6QasVFKanDI/p51mcjXV8+k8= +go.opentelemetry.io/collector/consumer v0.85.0/go.mod h1:Ysc7XgJDZuNkyyvQVKz+/e6R3Z7haiOcFNsjkgmBSHY= +go.opentelemetry.io/collector/exporter v0.85.0 h1:06RnmrcEiBG3xhhGcGNT+hEk/j/4XtHavlMIxZcJHQE= +go.opentelemetry.io/collector/exporter v0.85.0/go.mod h1:4RNnq8xd6OBmc10XUF/K+NLUMfRiXrus+XRpAZeoLuA= +go.opentelemetry.io/collector/extension v0.85.0 h1:izKozTZ4vOnYdMGhu0ROV69hXCptH6DI8JVkN/ZOEJQ= +go.opentelemetry.io/collector/extension v0.85.0/go.mod h1:l5zRPdhtmBjRmPeOhxJsZ/GRIimq4/HIAq/Rgf70W+o= +go.opentelemetry.io/collector/extension/auth v0.85.0 h1:mMZfNUCeWaXnQY85gepFA9uhzsEf0AAnAE4/Oblx/Wc= +go.opentelemetry.io/collector/extension/auth v0.85.0/go.mod h1:TBe1YtL27cc7liJFpamMhkmMVOEzR5/ccavvQFlLkAQ= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014 h1:C9o0mbP0MyygqFnKueVQK/v9jef6zvuttmTGlKaqhgw= go.opentelemetry.io/collector/featuregate v1.0.0-rcv0014/go.mod h1:0mE3mDLmUrOXVoNsuvj+7dV14h/9HFl/Fy9YTLoLObo= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= +go.opentelemetry.io/collector/processor v0.85.0 h1:vN3A+E/rm6nCkx9iM+kjlMzXAtE0a2vP1JhF9oqLjZQ= +go.opentelemetry.io/collector/processor v0.85.0/go.mod h1:jp9vOjg9dS13zUy1ma6C4u4hGTdbp2GKb9xp5bX2krc= +go.opentelemetry.io/collector/receiver v0.85.0 h1:ecDJicAq1mbQcsEkzo1q+6Y2DP8plK51y4p4w7ZhNmA= +go.opentelemetry.io/collector/receiver v0.85.0/go.mod h1:6pmub7FpyQIAcCLmijUtelzWD9Jj8Csno7W+3FBQFJo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 h1:HKORGpiOY0R0nAPtKx/ub8/7XoHhRooP8yNRkuPfelI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0/go.mod h1:e+y1M74SYXo/FcIx3UATwth2+5dDkM8dBi7eXg1tbw8= go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= +go.opentelemetry.io/otel/exporters/prometheus v0.40.1-0.20230831181707-02616a25c68e h1:eh19+pVw4g/V7QM/8iNDk3M5cNy3mOVzggEgdlt9jkY= go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= +go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= +go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= +go.opentelemetry.io/otel/sdk/metric v0.40.0 h1:qOM29YaGcxipWjL5FzpyZDpCYrDREvX0mVlmXdOjCHU= +go.opentelemetry.io/otel/sdk/metric v0.40.0/go.mod h1:dWxHtdzdJvg+ciJUKLTKwrMe5P6Dv3FyDbh8UkfgkVs= go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -290,6 +368,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= @@ -337,6 +416,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -381,6 +461,7 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= @@ -401,6 +482,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -409,6 +491,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go new file mode 100644 index 000000000000..9b41190aab88 --- /dev/null +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config.go @@ -0,0 +1,52 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import "go.opentelemetry.io/collector/confmap" + +// MetricConfig provides common config for a particular metric. +type MetricConfig struct { + Enabled bool `mapstructure:"enabled"` + + enabledSetByUser bool +} + +func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(ms, confmap.WithErrorUnused()) + if err != nil { + return err + } + ms.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// MetricsConfig provides config for splunkenterprise metrics. +type MetricsConfig struct { + SplunkIndexerThroughput MetricConfig `mapstructure:"splunk.indexer.throughput"` + SplunkLicenseIndexUsage MetricConfig `mapstructure:"splunk.license.index.usage"` +} + +func DefaultMetricsConfig() MetricsConfig { + return MetricsConfig{ + SplunkIndexerThroughput: MetricConfig{ + Enabled: true, + }, + SplunkLicenseIndexUsage: MetricConfig{ + Enabled: true, + }, + } +} + +// MetricsBuilderConfig is a configuration for splunkenterprise metrics builder. +type MetricsBuilderConfig struct { + Metrics MetricsConfig `mapstructure:"metrics"` +} + +func DefaultMetricsBuilderConfig() MetricsBuilderConfig { + return MetricsBuilderConfig{ + Metrics: DefaultMetricsConfig(), + } +} diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go new file mode 100644 index 000000000000..4ee23fa23b4c --- /dev/null +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_config_test.go @@ -0,0 +1,62 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap/confmaptest" +) + +func TestMetricsBuilderConfig(t *testing.T) { + tests := []struct { + name string + want MetricsBuilderConfig + }{ + { + name: "default", + want: DefaultMetricsBuilderConfig(), + }, + { + name: "all_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + SplunkIndexerThroughput: MetricConfig{Enabled: true}, + SplunkLicenseIndexUsage: MetricConfig{Enabled: true}, + }, + }, + }, + { + name: "none_set", + want: MetricsBuilderConfig{ + Metrics: MetricsConfig{ + SplunkIndexerThroughput: MetricConfig{Enabled: false}, + SplunkLicenseIndexUsage: MetricConfig{Enabled: false}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadMetricsBuilderConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + cfg := DefaultMetricsBuilderConfig() + require.NoError(t, component.UnmarshalConfig(sub, &cfg)) + return cfg +} diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go new file mode 100644 index 000000000000..a7e2738fa2bb --- /dev/null +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics.go @@ -0,0 +1,241 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" +) + +type metricSplunkIndexerThroughput struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.indexer.throughput metric with initial data. +func (m *metricSplunkIndexerThroughput) init() { + m.data.SetName("splunk.indexer.throughput") + m.data.SetDescription("Gauge tracking average bytes per second throughput of indexer") + m.data.SetUnit("By/s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkIndexerThroughput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, splunkIndexerStatusAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("splunk.indexer.status", splunkIndexerStatusAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkIndexerThroughput) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkIndexerThroughput) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkIndexerThroughput(cfg MetricConfig) metricSplunkIndexerThroughput { + m := metricSplunkIndexerThroughput{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSplunkLicenseIndexUsage struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills splunk.license.index.usage metric with initial data. +func (m *metricSplunkLicenseIndexUsage) init() { + m.data.SetName("splunk.license.index.usage") + m.data.SetDescription("Gauge tracking the indexed license usage per index") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSplunkLicenseIndexUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("splunk.index.name", splunkIndexNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSplunkLicenseIndexUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSplunkLicenseIndexUsage) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSplunkLicenseIndexUsage(cfg MetricConfig) metricSplunkLicenseIndexUsage { + m := metricSplunkLicenseIndexUsage{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user config. +type MetricsBuilder struct { + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + metricSplunkIndexerThroughput metricSplunkIndexerThroughput + metricSplunkLicenseIndexUsage metricSplunkLicenseIndexUsage +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricSplunkIndexerThroughput: newMetricSplunkIndexerThroughput(mbc.Metrics.SplunkIndexerThroughput), + metricSplunkLicenseIndexUsage: newMetricSplunkLicenseIndexUsage(mbc.Metrics.SplunkLicenseIndexUsage), + } + for _, op := range options { + op(mb) + } + return mb +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(pmetric.ResourceMetrics) + +// WithResource sets the provided resource on the emitted ResourceMetrics. +// It's recommended to use ResourceBuilder to create the resource. +func WithResource(res pcommon.Resource) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + res.CopyTo(rm.Resource()) + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).Type() { + case pmetric.MetricTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/splunkenterprisereceiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSplunkIndexerThroughput.emit(ils.Metrics()) + mb.metricSplunkLicenseIndexUsage.emit(ils.Metrics()) + + for _, op := range rmo { + op(rm) + } + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user config, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := mb.metricsBuffer + mb.metricsBuffer = pmetric.NewMetrics() + return metrics +} + +// RecordSplunkIndexerThroughputDataPoint adds a data point to splunk.indexer.throughput metric. +func (mb *MetricsBuilder) RecordSplunkIndexerThroughputDataPoint(ts pcommon.Timestamp, val float64, splunkIndexerStatusAttributeValue string) { + mb.metricSplunkIndexerThroughput.recordDataPoint(mb.startTime, ts, val, splunkIndexerStatusAttributeValue) +} + +// RecordSplunkLicenseIndexUsageDataPoint adds a data point to splunk.license.index.usage metric. +func (mb *MetricsBuilder) RecordSplunkLicenseIndexUsageDataPoint(ts pcommon.Timestamp, val int64, splunkIndexNameAttributeValue string) { + mb.metricSplunkLicenseIndexUsage.recordDataPoint(mb.startTime, ts, val, splunkIndexNameAttributeValue) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go new file mode 100644 index 000000000000..81aca74c5a07 --- /dev/null +++ b/receiver/splunkenterprisereceiver/internal/metadata/generated_metrics_test.go @@ -0,0 +1,121 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +type testConfigCollection int + +const ( + testSetDefault testConfigCollection = iota + testSetAll + testSetNone +) + +func TestMetricsBuilder(t *testing.T) { + tests := []struct { + name string + configSet testConfigCollection + }{ + { + name: "default", + configSet: testSetDefault, + }, + { + name: "all_set", + configSet: testSetAll, + }, + { + name: "none_set", + configSet: testSetNone, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + observedZapCore, observedLogs := observer.New(zap.WarnLevel) + settings := receivertest.NewNopCreateSettings() + settings.Logger = zap.New(observedZapCore) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + + expectedWarnings := 0 + assert.Equal(t, expectedWarnings, observedLogs.Len()) + + defaultMetricsCount := 0 + allMetricsCount := 0 + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkIndexerThroughputDataPoint(ts, 1, "splunk.indexer.status-val") + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordSplunkLicenseIndexUsageDataPoint(ts, 1, "splunk.index.name-val") + + res := pcommon.NewResource() + metrics := mb.Emit(WithResource(res)) + + if test.configSet == testSetNone { + assert.Equal(t, 0, metrics.ResourceMetrics().Len()) + return + } + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + rm := metrics.ResourceMetrics().At(0) + assert.Equal(t, res, rm.Resource()) + assert.Equal(t, 1, rm.ScopeMetrics().Len()) + ms := rm.ScopeMetrics().At(0).Metrics() + if test.configSet == testSetDefault { + assert.Equal(t, defaultMetricsCount, ms.Len()) + } + if test.configSet == testSetAll { + assert.Equal(t, allMetricsCount, ms.Len()) + } + validatedMetrics := make(map[string]bool) + for i := 0; i < ms.Len(); i++ { + switch ms.At(i).Name() { + case "splunk.indexer.throughput": + assert.False(t, validatedMetrics["splunk.indexer.throughput"], "Found a duplicate in the metrics slice: splunk.indexer.throughput") + validatedMetrics["splunk.indexer.throughput"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking average bytes per second throughput of indexer", ms.At(i).Description()) + assert.Equal(t, "By/s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("splunk.indexer.status") + assert.True(t, ok) + assert.EqualValues(t, "splunk.indexer.status-val", attrVal.Str()) + case "splunk.license.index.usage": + assert.False(t, validatedMetrics["splunk.license.index.usage"], "Found a duplicate in the metrics slice: splunk.license.index.usage") + validatedMetrics["splunk.license.index.usage"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Gauge tracking the indexed license usage per index", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("splunk.index.name") + assert.True(t, ok) + assert.EqualValues(t, "splunk.index.name-val", attrVal.Str()) + } + } + }) + } +} diff --git a/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml b/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml new file mode 100644 index 000000000000..3b87daccd3e2 --- /dev/null +++ b/receiver/splunkenterprisereceiver/internal/metadata/testdata/config.yaml @@ -0,0 +1,13 @@ +default: +all_set: + metrics: + splunk.indexer.throughput: + enabled: true + splunk.license.index.usage: + enabled: true +none_set: + metrics: + splunk.indexer.throughput: + enabled: false + splunk.license.index.usage: + enabled: false diff --git a/receiver/splunkenterprisereceiver/metadata.yaml b/receiver/splunkenterprisereceiver/metadata.yaml index eb4493825fa8..16f2dfb3675a 100644 --- a/receiver/splunkenterprisereceiver/metadata.yaml +++ b/receiver/splunkenterprisereceiver/metadata.yaml @@ -6,4 +6,30 @@ status: development: [metrics] distributions: codeowners: - active: [shalper2, MovieStoreGuy] \ No newline at end of file + active: [shalper2, MovieStoreGuy] + +attributes: + splunk.index.name: + description: The name of the index reporting a specific KPI + type: string + splunk.indexer.status: + description: The status message reported for a specific object + type: string + +metrics: + splunk.license.index.usage: + enabled: true + description: Gauge tracking the indexed license usage per index + unit: By + gauge: + value_type: int + attributes: [splunk.index.name] + # 'services/server/introspection/indexer' + splunk.indexer.throughput: + enabled: true + description: Gauge tracking average bytes per second throughput of indexer + unit: By/s + gauge: + value_type: double + # attribute `status` can be one of the following `normal`, `throttled`, `stopped` + attributes: [splunk.indexer.status] diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index dd66eea50ac0..90c2d1032898 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -2,3 +2,194 @@ // SPDX-License-Identifier: Apache-2.0 package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "context" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/scrapererror" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" +) + +var ( + errMaxSearchWaitTimeExceeded = errors.New("Maximum search wait time exceeded for metric") +) + +type splunkScraper struct { + splunkClient *splunkEntClient + settings component.TelemetrySettings + conf *Config + mb *metadata.MetricsBuilder +} + +func newSplunkMetricsScraper(params receiver.CreateSettings, cfg *Config) splunkScraper { + return splunkScraper{ + settings: params.TelemetrySettings, + conf: cfg, + mb: metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, params), + } +} + +// Create a client instance and add to the splunkScraper +func (s *splunkScraper) start(_ context.Context, _ component.Host) (err error) { + c := newSplunkEntClient(s.conf) + s.splunkClient = &c + return nil +} + +// The big one: Describes how all scraping tasks should be performed. Part of the scraper interface +func (s *splunkScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { + errs := &scrapererror.ScrapeErrors{} + now := pcommon.NewTimestampFromTime(time.Now()) + + s.scrapeLicenseUsageByIndex(ctx, now, errs) + s.scrapeIndexThroughput(ctx, now, errs) + return s.mb.Emit(), errs.Combine() +} + +// Each metric has its own scrape function associated with it +func (s *splunkScraper) scrapeLicenseUsageByIndex(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var sr searchResponse + // Because we have to utilize network resources for each KPI we should check that each metrics + // is enabled before proceeding + if !s.conf.MetricsBuilderConfig.Metrics.SplunkLicenseIndexUsage.Enabled { + return + } + + sr = searchResponse{ + search: searchDict[`SplunkLicenseIndexUsageSearch`], + } + + var ( + req *http.Request + res *http.Response + err error + ) + + start := time.Now() + + for { + req, err = s.splunkClient.createRequest(ctx, &sr) + if err != nil { + errs.Add(err) + return + } + + res, err = s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + + // if its a 204 the body will be empty because we are still waiting on search results + err = unmarshallSearchReq(res, &sr) + if err != nil { + errs.Add(err) + return + } + res.Body.Close() + + // if no errors and 200 returned scrape was successful, return. Note we must make sure that + // the 200 is coming after the first request which provides a jobId to retrieve results + if sr.Return == 200 && sr.Jobid != nil { + break + } + + if sr.Return == 204 { + time.Sleep(2 * time.Second) + } + + if time.Since(start) > s.conf.MaxSearchWaitTime { + errs.Add(errMaxSearchWaitTimeExceeded) + return + } + } + + // Record the results + var indexName string + for _, f := range sr.Fields { + switch fieldName := f.FieldName; fieldName { + case "indexname": + indexName = f.Value + continue + case "By": + v, err := strconv.ParseFloat(f.Value, 64) + if err != nil { + errs.Add(err) + continue + } + s.mb.RecordSplunkLicenseIndexUsageDataPoint(now, int64(v), indexName) + } + } +} + +// Helper function for unmarshaling search endpoint requests +func unmarshallSearchReq(res *http.Response, sr *searchResponse) error { + sr.Return = res.StatusCode + + if res.ContentLength == 0 { + return nil + } + + body, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("Failed to read response: %w", err) + } + + err = xml.Unmarshal(body, &sr) + if err != nil { + return fmt.Errorf("Failed to unmarshall response: %w", err) + } + + return nil +} + +// Scrape index throughput introspection endpoint +func (s *splunkScraper) scrapeIndexThroughput(ctx context.Context, now pcommon.Timestamp, errs *scrapererror.ScrapeErrors) { + var it indexThroughput + var ept string + + if !s.conf.MetricsBuilderConfig.Metrics.SplunkIndexerThroughput.Enabled { + return + } + + ept = apiDict[`SplunkIndexerThroughput`] + + req, err := s.splunkClient.createAPIRequest(ctx, ept) + if err != nil { + errs.Add(err) + } + + res, err := s.splunkClient.makeRequest(req) + if err != nil { + errs.Add(err) + return + } + defer res.Body.Close() + + body, err := io.ReadAll(res.Body) + if err != nil { + errs.Add(err) + } + + err = json.Unmarshal(body, &it) + if err != nil { + errs.Add(err) + } + + for _, entry := range it.Entries { + s.mb.RecordSplunkIndexerThroughputDataPoint(now, 1000*entry.Content.AvgKb, entry.Content.Status) + } +} diff --git a/receiver/splunkenterprisereceiver/scraper_test.go b/receiver/splunkenterprisereceiver/scraper_test.go index bcc05019db8f..12eadeee18f3 100644 --- a/receiver/splunkenterprisereceiver/scraper_test.go +++ b/receiver/splunkenterprisereceiver/scraper_test.go @@ -1,4 +1,85 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package splunkenterprisereceiver +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +import ( + "context" + "net/http" + "net/http/httptest" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver/internal/metadata" +) + +// handler function for mock server +func mockIndexerThroughput(w http.ResponseWriter, _ *http.Request) { + status := http.StatusOK + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + _, _ = w.Write([]byte(`{"links":{},"origin":"https://somehost:8089/services/server/introspection/indexer","updated":"2023-07-31T21:41:07+00:00","generator":{"build":"82c987350fde","version":"9.0.1"},"entry":[{"name":"indexer","id":"https://34.213.134.166:8089/services/server/introspection/indexer/indexer","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/server/introspection/indexer/indexer","list":"/services/server/introspection/indexer/indexer","edit":"/services/server/introspection/indexer/indexer"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"average_KBps":25.579690815904478,"eai:acl":null,"reason":"","status":"normal"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`)) +} + +// mock server create +func createMockServer() *httptest.Server { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch strings.TrimSpace(r.URL.Path) { + case "/services/server/introspection/indexer": + mockIndexerThroughput(w, r) + default: + http.NotFoundHandler().ServeHTTP(w, r) + } + })) + + return ts +} + +func TestScraper(t *testing.T) { + ts := createMockServer() + defer ts.Close() + + // in the future add more metrics + metricsettings := metadata.MetricsBuilderConfig{} + metricsettings.Metrics.SplunkIndexerThroughput.Enabled = true + + cfg := &Config{ + Username: "admin", + Password: "securityFirst", + MaxSearchWaitTime: 11 * time.Second, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: ts.URL, + }, + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + CollectionInterval: 10 * time.Second, + InitialDelay: 1 * time.Second, + }, + MetricsBuilderConfig: metricsettings, + } + + require.NoError(t, cfg.Validate()) + + scraper := newSplunkMetricsScraper(receivertest.NewNopCreateSettings(), cfg) + client := newSplunkEntClient(cfg) + scraper.splunkClient = &client + + actualMetrics, err := scraper.scrape(context.Background()) + require.NoError(t, err) + + expectedFile := filepath.Join("testdata", "scraper", "expected.yaml") + // golden.WriteMetrics(t, expectedFile, actualMetrics) // run tests with this line whenever metrics are modified + + expectedMetrics, err := golden.ReadMetrics(expectedFile) + require.NoError(t, err) + + require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics, pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) +} diff --git a/receiver/splunkenterprisereceiver/search_result.go b/receiver/splunkenterprisereceiver/search_result.go new file mode 100644 index 000000000000..74761bbbd667 --- /dev/null +++ b/receiver/splunkenterprisereceiver/search_result.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package splunkenterprisereceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/splunkenterprisereceiver" + +// metric name and its associated search as a key value pair +var searchDict = map[string]string{ + `SplunkLicenseIndexUsageSearch`: `search=search index=_internal source=*license_usage.log type="Usage"| fields idx, b| eval indexname = if(len(idx)=0 OR isnull(idx),"(UNKNOWN)",idx)| stats sum(b) as b by indexname| eval By=round(b, 9)| fields indexname, By`, +} + +var apiDict = map[string]string{ + `SplunkIndexerThroughput`: `/services/server/introspection/indexer?output_mode=json`, +} + +type searchResponse struct { + search string + Jobid *string `xml:"sid"` + Return int + Fields []*field `xml:"result>field"` +} + +type field struct { + FieldName string `xml:"k,attr"` + Value string `xml:"value>text"` +} + +// '/services/server/introspection/indexer' +type indexThroughput struct { + Entries []idxTEntry `json:"entry"` +} + +type idxTEntry struct { + Content idxTContent `json:"content"` +} + +type idxTContent struct { + Status string `json:"status"` + AvgKb float64 `json:"average_KBps"` +} diff --git a/receiver/splunkenterprisereceiver/testdata/config.yaml b/receiver/splunkenterprisereceiver/testdata/config.yaml new file mode 100644 index 000000000000..ba27c230bf44 --- /dev/null +++ b/receiver/splunkenterprisereceiver/testdata/config.yaml @@ -0,0 +1,16 @@ +# Example config for the Splunk Enterprise Receiver. + +splunkenterprise: + # required settings + username: "admin" + password: "securityFirst" + endpoint: "https://localhost:8089" + # Optional settings + collection_interval: 10s + max_search_wait_time: 11s + # Also optional: metric settings + metrics: + splunk.license.index.usage: + enabled: true + splunk.indexer.throughput: + enabled: false diff --git a/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml b/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml new file mode 100644 index 000000000000..8685f700eb0e --- /dev/null +++ b/receiver/splunkenterprisereceiver/testdata/scraper/expected.yaml @@ -0,0 +1,19 @@ +resourceMetrics: + - resource: {} + scopeMetrics: + - metrics: + - description: Gauge tracking average bytes per second throughput of indexer + gauge: + dataPoints: + - asDouble: 25579.690815904476 + attributes: + - key: splunk.indexer.status + value: + stringValue: normal + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: splunk.indexer.throughput + unit: By/s + scope: + name: otelcol/splunkenterprisereceiver + version: latest diff --git a/receiver/sqlqueryreceiver/go.mod b/receiver/sqlqueryreceiver/go.mod index f8d2029d35b2..e76b8757d446 100644 --- a/receiver/sqlqueryreceiver/go.mod +++ b/receiver/sqlqueryreceiver/go.mod @@ -38,7 +38,7 @@ require ( github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/andybalholm/brotli v1.0.4 // indirect - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect github.com/apache/thrift v0.19.0 // indirect github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect diff --git a/receiver/sqlqueryreceiver/go.sum b/receiver/sqlqueryreceiver/go.sum index 71fd93551298..69ca39984b1a 100644 --- a/receiver/sqlqueryreceiver/go.sum +++ b/receiver/sqlqueryreceiver/go.sum @@ -37,8 +37,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= github.com/apache/thrift v0.19.0 h1:sOqkWPzMj7w6XaYbJQG7m4sGqVolaW/0D28Ln7yPzMk= diff --git a/receiver/syslogreceiver/go.mod b/receiver/syslogreceiver/go.mod index 207bedf79992..b1dc7e41033f 100644 --- a/receiver/syslogreceiver/go.mod +++ b/receiver/syslogreceiver/go.mod @@ -14,7 +14,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/receiver/syslogreceiver/go.sum b/receiver/syslogreceiver/go.sum index e11f6952346d..1703ccd6b14f 100644 --- a/receiver/syslogreceiver/go.sum +++ b/receiver/syslogreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/tcplogreceiver/go.mod b/receiver/tcplogreceiver/go.mod index c6c9f49b0e3b..78d820112b9c 100644 --- a/receiver/tcplogreceiver/go.mod +++ b/receiver/tcplogreceiver/go.mod @@ -12,7 +12,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/receiver/tcplogreceiver/go.sum b/receiver/tcplogreceiver/go.sum index e11f6952346d..1703ccd6b14f 100644 --- a/receiver/tcplogreceiver/go.sum +++ b/receiver/tcplogreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/udplogreceiver/go.mod b/receiver/udplogreceiver/go.mod index c3b9e786a6ca..17011e7ef918 100644 --- a/receiver/udplogreceiver/go.mod +++ b/receiver/udplogreceiver/go.mod @@ -12,7 +12,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/receiver/udplogreceiver/go.sum b/receiver/udplogreceiver/go.sum index 5cb31791432e..70d8cacfdca0 100644 --- a/receiver/udplogreceiver/go.sum +++ b/receiver/udplogreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= diff --git a/receiver/windowseventlogreceiver/go.mod b/receiver/windowseventlogreceiver/go.mod index 73f191dcc5ef..3932a0b2f406 100644 --- a/receiver/windowseventlogreceiver/go.mod +++ b/receiver/windowseventlogreceiver/go.mod @@ -14,7 +14,7 @@ require ( ) require ( - github.com/antonmedv/expr v1.15.1 // indirect + github.com/antonmedv/expr v1.15.2 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect diff --git a/receiver/windowseventlogreceiver/go.sum b/receiver/windowseventlogreceiver/go.sum index 5cb31791432e..70d8cacfdca0 100644 --- a/receiver/windowseventlogreceiver/go.sum +++ b/receiver/windowseventlogreceiver/go.sum @@ -8,8 +8,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.1 h1:mxeRIkH8GQJo4MRRFgp0ArlV4AA+0DmcJNXEsG70rGU= -github.com/antonmedv/expr v1.15.1/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= +github.com/antonmedv/expr v1.15.2 h1:afFXpDWIC2n3bF+kTZE1JvFo+c34uaM3sTqh8z0xfdU= +github.com/antonmedv/expr v1.15.2/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=