diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index c374bfeb0ef..61f8c6b9e13 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -425,7 +425,7 @@ pipeline { cleanup { notifyBuildResult(prComment: true, analyzeFlakey: !isTag(), jobName: getFlakyJobName(withBranch: (isPR() ? env.CHANGE_TARGET : env.BRANCH_NAME)), - githubIssue: isBranch() && currentBuild.currentResult != "SUCCESS", + githubIssue: false, // Disable creating gh issues for build failures while the E2E tests are stabilized. githubLabels: 'Team:Elastic-Agent-Control-Plane') } } diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 00000000000..bb6e00e40a3 --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,91 @@ +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 90 + +# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. +# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. +daysUntilClose: 90 + +# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - flaky-test + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: false + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: true + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: true + +# Label to use when marking as stale +staleLabel: Stalled + +# Comment to post when marking as stale. Set to `false` to disable +markComment: > + Hi! + + We just realized that we haven't looked into this issue in a while. We're + sorry! + + + We're labeling this issue as `Stale` to make it hit our filters and + make sure we get back to it as soon as possible. In the meantime, it'd + be extremely helpful if you could take a look at it as well and confirm its + relevance. A simple comment with a nice emoji will be enough `:+1`. + + Thank you for your contribution! + +# Comment to post when removing the stale label. +# unmarkComment: > +# Your comment here. + +# Comment to post when closing a stale Issue or Pull Request. +# closeComment: > +# Your comment here. + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +# Limit to only `issues` or `pulls` +# only: issues + +# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': +pulls: + daysUntilStale: 60 + daysUntilClose: 30 + markComment: > + Hi! + + We just realized that we haven't looked into this PR in a while. We're + sorry! + + + We're labeling this issue as `Stale` to make it hit our filters and + make sure we get back to it as soon as possible. In the meantime, it'd + be extremely helpful if you could take a look at it as well and confirm its + relevance. A simple comment with a nice emoji will be enough `:+1`. + + Thank you for your contribution! + + closeComment: > + Hi! + + This PR has been stale for a while and we're going to close it as part of + our cleanup procedure. + + We appreciate your contribution and would like to apologize if we have not + been able to review it, due to the current heavy load of the team. + + Feel free to re-open this PR if you think it should stay open and is worth rebasing. + + Thank you for your contribution! + +# issues: +# exemptLabels: +# - confirmed diff --git a/.github/workflows/add-issues-to-ingest-board.yml b/.github/workflows/add-issues-to-ingest-board.yml new file mode 100644 index 00000000000..499fa012efc --- /dev/null +++ b/.github/workflows/add-issues-to-ingest-board.yml @@ -0,0 +1,53 @@ +name: Add issue to Platform Ingest project + +on: + issues: + types: + - labeled + +env: + INGEST_PROJECT_ID: 'PVT_kwDOAGc3Zs4AEzn4' + DATA_PLANE_LABEL: 'Team:Elastic-Agent-Data-Plane' + CONTROL_PLANE_LABEL: 'Team:Elastic-Agent-Control-Plane' + ELASTIC_AGENT_LABEL: 'Team:Elastic-Agent' + AREA_FIELD_ID: 'PVTSSF_lADOAGc3Zs4AEzn4zgEgZSo' + ELASTIC_AGENT_OPTION_ID: 'c1e1a30a' + +jobs: + add_to_ingest_project: + runs-on: ubuntu-latest + steps: + - uses: octokit/graphql-action@v2.x + id: add_to_project + if: github.event.label.name == env.DATA_PLANE_LABEL || github.event.label.name == env.CONTROL_PLANE_LABEL || github.event.label.name == env.ELASTIC_AGENT_LABEL + with: + query: | + # Variables have to be snake cased because of https://github.com/octokit/graphql-action/issues/164 + mutation AddToIngestProject($project_id: ID!, $content_id: ID!) { + addProjectV2ItemById(input: { projectId: $project_id, contentId: $content_id }) { + item { + id + } + } + } + project_id: ${{ env.INGEST_PROJECT_ID }} + content_id: ${{ github.event.issue.node_id }} + env: + GITHUB_TOKEN: ${{ secrets.PROJECT_ASSIGNER_TOKEN }} + - uses: octokit/graphql-action@v2.x + id: set_elastic_agent_area + if: github.event.label.name == env.DATA_PLANE_LABEL || github.event.label.name == env.CONTROL_PLANE_LABEL || github.event.label.name == env.ELASTIC_AGENT_LABEL + with: + query: | + mutation updateIngestArea($item_id: ID!, $project_id: ID!, $area_field_id: ID!, $area_id: String) { + updateProjectV2ItemFieldValue( + input: { itemId: $item_id, projectId: $project_id, fieldId: $area_field_id, value: { singleSelectOptionId: $area_id } }) { + clientMutationId + } + } + item_id: ${{ fromJSON(steps.add_to_project.outputs.data).addProjectV2ItemById.item.id }} + project_id: ${{ env.INGEST_PROJECT_ID }} + area_field_id: ${{ env.AREA_FIELD_ID }} + area_id: ${{ env.ELASTIC_AGENT_OPTION_ID }} + env: + GITHUB_TOKEN: ${{ secrets.PROJECT_ASSIGNER_TOKEN }} diff --git a/.gitignore b/.gitignore index 476cfd50764..57546893fb4 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,7 @@ fleet.enc.lock # Files generated with the bump version automations *.bck + # agent build/ elastic-agent diff --git a/.go-version b/.go-version index d6f3a382b34..1a31d398cf5 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.18.7 +1.18.8 diff --git a/README.md b/README.md index bd0ae71c5fc..3fa2fd6de3a 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,24 @@ The source files for the general Elastic Agent documentation are currently stored in the [observability-docs](https://github.com/elastic/observability-docs) repo. The following docs are only focused on getting developers started building code for Elastic Agent. +### Changelog + +The changelog for the Elastic Agent is generated and maintained using the [elastic-agent-changelog-tool](https://github.com/elastic/elastic-agent-changelog-tool). Read the [installation](https://github.com/elastic/elastic-agent-changelog-tool/blob/main/docs/install.md) +and [usage](https://github.com/elastic/elastic-agent-changelog-tool/blob/main/docs/usage.md#im-a-developer) instructions to get started. + +The changelog tool produces fragement files that are consolidated to generate a changelog for each release. Each PR containing a change with user +impact (new feature, bug fix, etc.) must contain a changelog fragement describing the change. There is a GitHub action in CI that will fail +if a PR does not contain a changelog fragment. For PRs that should not have a changelog entry, use the "skip-changelog" label to bypass +this check. + +A simple example of a changelog fragment is below for reference: + +```yml +kind: bug-fix +summary: Fix a panic caused by a race condition when installing the Elastic Agent. +pr: https://github.com/elastic/elastic-agent/pull/823 +``` + ## Testing Prerequisites: @@ -27,6 +45,13 @@ export COMPOSE_DOCKER_CLI_BUILD=0 export DOCKER_DEFAULT_PLATFORM=linux/amd64 ``` +If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD +``` +export DOCKER_BUILDKIT=0 +export COMPOSE_DOCKER_CLI_BUILD=0 +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` + In Linux operating systems that you can not run docker as a root user you need to follow [linux-postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/) ### Testing docker container diff --git a/changelog/8.5.0.yaml b/changelog/8.5.0.yaml new file mode 100644 index 00000000000..dd0502bbf01 --- /dev/null +++ b/changelog/8.5.0.yaml @@ -0,0 +1,129 @@ +version: 8.5.0 +entries: + - kind: bug-fix + summary: Fix a panic caused by a race condition when installing the Elastic Agent. + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/823 + issue: + - https://github.com/elastic/elastic-agent/issues/806 + timestamp: 1660139385 + file: + name: 1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml + checksum: be820e85b12290a895bfd37c2c2245bfae5a70c9 + - kind: breaking-change + summary: Upgrade to Go 1.18. Certificates signed with SHA-1 are now rejected. See the Go 1.18 https//tip.golang.org/doc/go1.18#sha1[release notes] for details. + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/832 + issue: [] + timestamp: 1660158319 + file: + name: 1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml + checksum: 2b304d75a687ec7384f3011a55f243fef66e447b + - kind: feature + summary: Add `lumberjack` input type to the Filebeat spec. + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/959 + issue: [] + timestamp: 1661188787 + file: + name: 1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml + checksum: ab7e3af045affbda1522c029cc56a9c5403340c3 + - kind: feature + summary: Add support for hints' based autodiscovery in kubernetes provider. + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/698 + issue: [] + timestamp: 1663143487 + file: + name: 1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml + checksum: 398470d14a8475e93aaf66f96407570653dde71e + - kind: bug-fix + summary: Fix unintended reset of source URI when downloading components + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1252 + issue: [] + timestamp: 1664177394 + file: + name: 1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml + checksum: 1040aceb00b70182c0bba621e15cfe711e32f9fe + - kind: bug-fix + summary: Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1285 + issue: + - https://github.com/elastic/elastic-agent/issues/1157 + timestamp: 1664212969 + file: + name: 1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml + checksum: 929f1ada47aeaaf9c631091c0f2732f631b3539f + - kind: feature + summary: Improve logging during upgrades. + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1287 + issue: + - https://github.com/elastic/elastic-agent/issues/1253 + timestamp: 1664230732 + file: + name: 1664230732-Improve-logging-during-upgrades.yaml + checksum: 13ab968324d342118c5a257d6c6cc0c5db161b46 + - kind: bug-fix + summary: Add success log message after previous checkin failures + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1327 + issue: [] + timestamp: 1664360554 + file: + name: 1664360554-Add-success-log-message-after-previous-checkin-failures.yaml + checksum: 819a22a452dddfa3b2976433d5cb4c8354a6ccc5 + - kind: bug-fix + summary: Fix docker provider add_fields processors + description: "" + component: providers + pr: + - https://github.com/elastic/elastic-agent/pull/1420 + issue: + - https://github.com/elastic/elastic-agent/issues/29030 + timestamp: 1664989867 + file: + name: 1664989867-fix-docker-provider-processors.yaml + checksum: 45731791cd2e8f1d747abfc47bb256971a77d015 + - kind: enhancement + summary: Improve logging of Fleet check-in errors. + description: Improve logging of Fleet check-in errors and only report the local state as degraded after two consecutive failed check-ins. + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1477 + issue: + - https://github.com/elastic/elastic-agent/issues/1154 + timestamp: 1665517984 + file: + name: 1665517984-improve-checkin-error-logging.yaml + checksum: 2d3dd39309def9a082f794eda815af459596c2e6 + - kind: bug-fix + summary: Fix admin permission check on localized windows + description: "" + component: "" + pr: + - https://github.com/elastic/elastic-agent/pull/1552 + issue: + - https://github.com/elastic/elastic-agent/issues/857 + timestamp: 1666088774 + file: + name: 1666088774-Fix-admin-permission-check-on-localized-windows.yaml + checksum: 16ee5909c319680b8d32045e74c38922eafc29ea diff --git a/changelog/fragments/1665780486-heartbeat-es-output-only.yaml b/changelog/fragments/1665780486-heartbeat-es-output-only.yaml new file mode 100644 index 00000000000..1e3b4059ddf --- /dev/null +++ b/changelog/fragments/1665780486-heartbeat-es-output-only.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Only support elasticsearch as an output for the beta synthetics integration. + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +# description: + +# Affected component; a word indicating the component this changeset affects. +component: synthetics-integration + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1491 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml b/changelog/fragments/1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml new file mode 100644 index 00000000000..c0f13aa3d9c --- /dev/null +++ b/changelog/fragments/1666281194-Fix-how-multiple-Fleet-Server-hosts-are-handled.yaml @@ -0,0 +1,35 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix how multiple Fleet Server hosts are handled + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: It fixes the bug when the Elastic Agent would be enrolled using + a valid Fleet Server URL, but the policy would contain more than one, being + the first URL unreachable. In that case the Elastic Agent would enroll with + Fleet Server, but become unhealthy as it'd get stuck trying only the first, + unreachable Fleet Server host. + +# Affected component; a word indicating the component this changeset affects. +#component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1329 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1666611696-fix_service_stop_timeout.yaml b/changelog/fragments/1666611696-fix_service_stop_timeout.yaml new file mode 100644 index 00000000000..5125282618f --- /dev/null +++ b/changelog/fragments/1666611696-fix_service_stop_timeout.yaml @@ -0,0 +1,4 @@ +kind: bug-fix +summary: "Fix: Windows Agent Left Unhealthy After Removing Endpoint Integration" +pr: 1286 +issue: 1262 diff --git a/changelog/fragments/1666789812-Improve-shutdown-logs.yaml b/changelog/fragments/1666789812-Improve-shutdown-logs.yaml new file mode 100644 index 00000000000..091e2570ae6 --- /dev/null +++ b/changelog/fragments/1666789812-Improve-shutdown-logs.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Improve shutdown logs + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: cmd, handler, upgrade + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1618 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 1358 diff --git a/changelog/fragments/1667243040-Upgrade-node-to-18.12.0.yaml b/changelog/fragments/1667243040-Upgrade-node-to-18.12.0.yaml new file mode 100644 index 00000000000..a670ebab9cb --- /dev/null +++ b/changelog/fragments/1667243040-Upgrade-node-to-18.12.0.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Upgrade node to 18.12.0 + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: 1234 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml index 007060a5ac0..cbdaf66311f 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -53,44 +53,44 @@ inputs: dataset: activemq.broker type: metrics hosts: - - ${kubernetes.hints.activemq.broker.host|'localhost:8161'} + - ${kubernetes.hints.activemq.broker.host|kubernetes.hints.activemq.host|'localhost:8161'} metricsets: - broker - password: ${kubernetes.hints.activemq.broker.password|'admin'} + password: ${kubernetes.hints.activemq.broker.password|kubernetes.hints.activemq.password|'admin'} path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false - period: ${kubernetes.hints.activemq.broker.period|'10s'} + period: ${kubernetes.hints.activemq.broker.period|kubernetes.hints.activemq.period|'10s'} tags: - forwarded - activemq-broker - username: ${kubernetes.hints.activemq.broker.username|'admin'} + username: ${kubernetes.hints.activemq.broker.username|kubernetes.hints.activemq.username|'admin'} - condition: ${kubernetes.hints.activemq.queue.enabled} == true or ${kubernetes.hints.activemq.enabled} == true data_stream: dataset: activemq.queue type: metrics hosts: - - ${kubernetes.hints.activemq.queue.host|'localhost:8161'} + - ${kubernetes.hints.activemq.queue.host|kubernetes.hints.activemq.host|'localhost:8161'} metricsets: - queue - password: ${kubernetes.hints.activemq.queue.password|'admin'} + password: ${kubernetes.hints.activemq.queue.password|kubernetes.hints.activemq.password|'admin'} path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false - period: ${kubernetes.hints.activemq.queue.period|'10s'} + period: ${kubernetes.hints.activemq.queue.period|kubernetes.hints.activemq.period|'10s'} tags: - forwarded - activemq-queue - username: ${kubernetes.hints.activemq.queue.username|'admin'} + username: ${kubernetes.hints.activemq.queue.username|kubernetes.hints.activemq.username|'admin'} - condition: ${kubernetes.hints.activemq.topic.enabled} == true or ${kubernetes.hints.activemq.enabled} == true data_stream: dataset: activemq.topic type: metrics hosts: - - ${kubernetes.hints.activemq.topic.host|'localhost:8161'} + - ${kubernetes.hints.activemq.topic.host|kubernetes.hints.activemq.host|'localhost:8161'} metricsets: - topic - password: ${kubernetes.hints.activemq.topic.password|'admin'} + password: ${kubernetes.hints.activemq.topic.password|kubernetes.hints.activemq.password|'admin'} path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false - period: ${kubernetes.hints.activemq.topic.period|'10s'} + period: ${kubernetes.hints.activemq.topic.period|kubernetes.hints.activemq.period|'10s'} tags: - forwarded - activemq-topic - username: ${kubernetes.hints.activemq.topic.username|'admin'} + username: ${kubernetes.hints.activemq.topic.username|kubernetes.hints.activemq.username|'admin'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml index a6e461a5363..8dcb71c6434 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -126,9 +126,9 @@ inputs: dataset: apache.status type: metrics hosts: - - ${kubernetes.hints.apache.status.host|'http://127.0.0.1'} + - ${kubernetes.hints.apache.status.host|kubernetes.hints.apache.host|'http://127.0.0.1'} metricsets: - status - period: ${kubernetes.hints.apache.status.period|'30s'} + period: ${kubernetes.hints.apache.status.period|kubernetes.hints.apache.period|'30s'} server_status_path: /server-status data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml index bce4edf635c..aafef542628 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -35,7 +35,7 @@ inputs: dataset: cassandra.metrics type: metrics hosts: - - ${kubernetes.hints.cassandra.metrics.host|'localhost:8778'} + - ${kubernetes.hints.cassandra.metrics.host|kubernetes.hints.cassandra.host|'localhost:8778'} jmx.mappings: - attributes: - attr: ReleaseVersion @@ -320,8 +320,8 @@ inputs: metricsets: - jmx namespace: metrics - password: ${kubernetes.hints.cassandra.metrics.password|'admin'} + password: ${kubernetes.hints.cassandra.metrics.password|kubernetes.hints.cassandra.password|'admin'} path: /jolokia/?ignoreErrors=true&canonicalNaming=false - period: ${kubernetes.hints.cassandra.metrics.period|'10s'} - username: ${kubernetes.hints.cassandra.metrics.username|'admin'} + period: ${kubernetes.hints.cassandra.metrics.period|kubernetes.hints.cassandra.period|'10s'} + username: ${kubernetes.hints.cassandra.metrics.username|kubernetes.hints.cassandra.username|'admin'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml index 3e55b02794d..ef637384ddc 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml @@ -9,14 +9,14 @@ inputs: dataset: cockroachdb.status type: metrics hosts: - - ${kubernetes.hints.cockroachdb.status.host|'localhost:8080'} + - ${kubernetes.hints.cockroachdb.status.host|kubernetes.hints.cockroachdb.host|'localhost:8080'} metrics_filters.exclude: null metrics_filters.include: null metrics_path: /_status/vars metricsets: - collector password: null - period: ${kubernetes.hints.cockroachdb.status.period|'10s'} + period: ${kubernetes.hints.cockroachdb.status.period|kubernetes.hints.cockroachdb.period|'10s'} ssl.certificate_authorities: null use_types: true username: null @@ -27,7 +27,7 @@ inputs: streams: - condition: ${kubernetes.hints.cockroachdb.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: cockroachdb.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml index 49503b63346..82060c4d961 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml @@ -180,7 +180,7 @@ inputs: dataset: elasticsearch.stack_monitoring.ccr type: metrics hosts: - - ${kubernetes.hints.elasticsearch.ccr.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.ccr.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - ccr period: null @@ -190,7 +190,7 @@ inputs: dataset: elasticsearch.stack_monitoring.cluster_stats type: metrics hosts: - - ${kubernetes.hints.elasticsearch.cluster_stats.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.cluster_stats.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - cluster_stats period: null @@ -200,7 +200,7 @@ inputs: dataset: elasticsearch.stack_monitoring.enrich type: metrics hosts: - - ${kubernetes.hints.elasticsearch.enrich.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.enrich.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - enrich period: null @@ -210,7 +210,7 @@ inputs: dataset: elasticsearch.stack_monitoring.index type: metrics hosts: - - ${kubernetes.hints.elasticsearch.index.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.index.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - index period: null @@ -220,7 +220,7 @@ inputs: dataset: elasticsearch.stack_monitoring.index_recovery type: metrics hosts: - - ${kubernetes.hints.elasticsearch.index_recovery.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.index_recovery.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - index_recovery period: null @@ -230,7 +230,7 @@ inputs: dataset: elasticsearch.stack_monitoring.index_summary type: metrics hosts: - - ${kubernetes.hints.elasticsearch.index_summary.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.index_summary.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - index_summary period: null @@ -240,7 +240,7 @@ inputs: dataset: elasticsearch.stack_monitoring.ml_job type: metrics hosts: - - ${kubernetes.hints.elasticsearch.ml_job.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.ml_job.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - ml_job period: null @@ -250,7 +250,7 @@ inputs: dataset: elasticsearch.stack_monitoring.node type: metrics hosts: - - ${kubernetes.hints.elasticsearch.node.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.node.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - node period: null @@ -260,7 +260,7 @@ inputs: dataset: elasticsearch.stack_monitoring.node_stats type: metrics hosts: - - ${kubernetes.hints.elasticsearch.node_stats.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.node_stats.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - node_stats period: null @@ -270,7 +270,7 @@ inputs: dataset: elasticsearch.stack_monitoring.pending_tasks type: metrics hosts: - - ${kubernetes.hints.elasticsearch.pending_tasks.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.pending_tasks.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - pending_tasks period: null @@ -280,7 +280,7 @@ inputs: dataset: elasticsearch.stack_monitoring.shard type: metrics hosts: - - ${kubernetes.hints.elasticsearch.shard.host|'http://localhost:9200'} + - ${kubernetes.hints.elasticsearch.shard.host|kubernetes.hints.elasticsearch.host|'http://localhost:9200'} metricsets: - shard period: null diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml index 178a6098f99..81e0684ae0c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml @@ -5,7 +5,7 @@ inputs: streams: - condition: ${kubernetes.hints.endpoint.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: endpoint.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml index cff5d5821aa..265a6c17863 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -1,4 +1,28 @@ inputs: + - name: filestream-haproxy + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.haproxy.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - haproxy-log + data_stream.namespace: default - name: syslog-haproxy type: syslog use_output: default @@ -24,45 +48,21 @@ inputs: dataset: haproxy.info type: metrics hosts: - - ${kubernetes.hints.haproxy.info.host|'tcp://127.0.0.1:14567'} + - ${kubernetes.hints.haproxy.info.host|kubernetes.hints.haproxy.host|'tcp://127.0.0.1:14567'} metricsets: - info - password: ${kubernetes.hints.haproxy.info.password|'admin'} - period: ${kubernetes.hints.haproxy.info.period|'10s'} - username: ${kubernetes.hints.haproxy.info.username|'admin'} + password: ${kubernetes.hints.haproxy.info.password|kubernetes.hints.haproxy.password|'admin'} + period: ${kubernetes.hints.haproxy.info.period|kubernetes.hints.haproxy.period|'10s'} + username: ${kubernetes.hints.haproxy.info.username|kubernetes.hints.haproxy.username|'admin'} - condition: ${kubernetes.hints.haproxy.stat.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true data_stream: dataset: haproxy.stat type: metrics hosts: - - ${kubernetes.hints.haproxy.stat.host|'tcp://127.0.0.1:14567'} + - ${kubernetes.hints.haproxy.stat.host|kubernetes.hints.haproxy.host|'tcp://127.0.0.1:14567'} metricsets: - stat - password: ${kubernetes.hints.haproxy.stat.password|'admin'} - period: ${kubernetes.hints.haproxy.stat.period|'10s'} - username: ${kubernetes.hints.haproxy.stat.username|'admin'} - data_stream.namespace: default - - name: filestream-haproxy - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true - data_stream: - dataset: haproxy.log - type: logs - exclude_files: - - .gz$ - parsers: - - container: - format: auto - stream: ${kubernetes.hints.haproxy.log.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - haproxy-log + password: ${kubernetes.hints.haproxy.stat.password|kubernetes.hints.haproxy.password|'admin'} + period: ${kubernetes.hints.haproxy.stat.period|kubernetes.hints.haproxy.period|'10s'} + username: ${kubernetes.hints.haproxy.stat.username|kubernetes.hints.haproxy.username|'admin'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml index 19892110b74..28bfd77da77 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml @@ -61,11 +61,11 @@ inputs: dataset: hashicorp_vault.metrics type: metrics hosts: - - ${kubernetes.hints.hashicorp_vault.metrics.host|'http://localhost:8200'} + - ${kubernetes.hints.hashicorp_vault.metrics.host|kubernetes.hints.hashicorp_vault.host|'http://localhost:8200'} metrics_path: /v1/sys/metrics metricsets: - collector - period: ${kubernetes.hints.hashicorp_vault.metrics.period|'30s'} + period: ${kubernetes.hints.hashicorp_vault.metrics.period|kubernetes.hints.hashicorp_vault.period|'30s'} query: format: prometheus rate_counters: true diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml index 28d8f782d69..4b8faa04e10 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml @@ -1,4 +1,15 @@ inputs: + - name: winlog-hid_bravura_monitor + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.winlog.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.winlog + type: logs + name: Hitachi-Hitachi ID Systems-Hitachi ID Suite/Operational + tags: null + data_stream.namespace: default - name: filestream-hid_bravura_monitor type: filestream use_output: default @@ -29,14 +40,3 @@ inputs: - .gz$ tags: null data_stream.namespace: default - - name: winlog-hid_bravura_monitor - type: winlog - use_output: default - streams: - - condition: ${kubernetes.hints.hid_bravura_monitor.winlog.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true - data_stream: - dataset: hid_bravura_monitor.winlog - type: logs - name: Hitachi-Hitachi ID Systems-Hitachi ID Suite/Operational - tags: null - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml index 44162f4ac6b..8ff2f64baf7 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml @@ -11,6 +11,7 @@ inputs: - .gz$ exclude_lines: - ^# + ignore_older: 72h parsers: - container: format: auto @@ -30,6 +31,7 @@ inputs: - .gz$ exclude_lines: - ^# + ignore_older: 72h parsers: - container: format: auto @@ -53,19 +55,19 @@ inputs: type: metrics metricsets: - application_pool - period: ${kubernetes.hints.iis.application_pool.period|'10s'} + period: ${kubernetes.hints.iis.application_pool.period|kubernetes.hints.iis.period|'10s'} - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true data_stream: dataset: iis.webserver type: metrics metricsets: - webserver - period: ${kubernetes.hints.iis.webserver.period|'10s'} + period: ${kubernetes.hints.iis.webserver.period|kubernetes.hints.iis.period|'10s'} - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true data_stream: dataset: iis.website type: metrics metricsets: - website - period: ${kubernetes.hints.iis.website.period|'10s'} + period: ${kubernetes.hints.iis.website.period|kubernetes.hints.iis.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml index 02d1d8330d3..25f38c5bf85 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml @@ -1,4 +1,17 @@ inputs: + - name: journald-iptables + type: journald + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + include_matches: + - _TRANSPORT=kernel + tags: + - iptables-log + data_stream.namespace: default - name: udp-iptables type: udp use_output: default @@ -39,16 +52,3 @@ inputs: - iptables-log - forwarded data_stream.namespace: default - - name: journald-iptables - type: journald - use_output: default - streams: - - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true - data_stream: - dataset: iptables.log - type: logs - include_matches: - - _TRANSPORT=kernel - tags: - - iptables-log - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml index b79eebbcfb0..f8e3de9e81a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -39,23 +39,23 @@ inputs: - localhost:8778 metricsets: - broker - period: ${kubernetes.hints.kafka.broker.period|'10s'} + period: ${kubernetes.hints.kafka.broker.period|kubernetes.hints.kafka.period|'10s'} - condition: ${kubernetes.hints.kafka.consumergroup.enabled} == true or ${kubernetes.hints.kafka.enabled} == true data_stream: dataset: kafka.consumergroup type: metrics hosts: - - ${kubernetes.hints.kafka.consumergroup.host|'localhost:9092'} + - ${kubernetes.hints.kafka.consumergroup.host|kubernetes.hints.kafka.host|'localhost:9092'} metricsets: - consumergroup - period: ${kubernetes.hints.kafka.consumergroup.period|'10s'} + period: ${kubernetes.hints.kafka.consumergroup.period|kubernetes.hints.kafka.period|'10s'} - condition: ${kubernetes.hints.kafka.partition.enabled} == true or ${kubernetes.hints.kafka.enabled} == true data_stream: dataset: kafka.partition type: metrics hosts: - - ${kubernetes.hints.kafka.partition.host|'localhost:9092'} + - ${kubernetes.hints.kafka.partition.host|kubernetes.hints.kafka.host|'localhost:9092'} metricsets: - partition - period: ${kubernetes.hints.kafka.partition.period|'10s'} + period: ${kubernetes.hints.kafka.partition.period|kubernetes.hints.kafka.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml index 1c27b4830ab..78ab5f35128 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml @@ -15,16 +15,6 @@ inputs: stream: ${kubernetes.hints.kibana.audit.stream|'all'} paths: - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - - add_fields: - fields: - ecs.version: 1.10.0 - target: "" - - decode_json_fields: - fields: - - message - target: kibana._audit_temp prospector: scanner: symlinks: true @@ -34,19 +24,12 @@ inputs: type: logs exclude_files: - .gz$ - json.add_error_key: true - json.keys_under_root: false parsers: - container: format: auto stream: ${kubernetes.hints.kibana.log.stream|'all'} paths: - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_fields: - fields: - ecs.version: 1.10.0 - target: "" prospector: scanner: symlinks: true @@ -60,7 +43,7 @@ inputs: dataset: kibana.stack_monitoring.cluster_actions type: metrics hosts: - - ${kubernetes.hints.kibana.cluster_actions.host|'http://localhost:5601'} + - ${kubernetes.hints.kibana.cluster_actions.host|kubernetes.hints.kibana.host|'http://localhost:5601'} metricsets: - cluster_actions period: null @@ -69,7 +52,7 @@ inputs: dataset: kibana.stack_monitoring.cluster_rules type: metrics hosts: - - ${kubernetes.hints.kibana.cluster_rules.host|'http://localhost:5601'} + - ${kubernetes.hints.kibana.cluster_rules.host|kubernetes.hints.kibana.host|'http://localhost:5601'} metricsets: - cluster_rules period: null @@ -78,7 +61,7 @@ inputs: dataset: kibana.stack_monitoring.node_actions type: metrics hosts: - - ${kubernetes.hints.kibana.node_actions.host|'http://localhost:5601'} + - ${kubernetes.hints.kibana.node_actions.host|kubernetes.hints.kibana.host|'http://localhost:5601'} metricsets: - node_actions period: null @@ -87,7 +70,7 @@ inputs: dataset: kibana.stack_monitoring.node_rules type: metrics hosts: - - ${kubernetes.hints.kibana.node_rules.host|'http://localhost:5601'} + - ${kubernetes.hints.kibana.node_rules.host|kubernetes.hints.kibana.host|'http://localhost:5601'} metricsets: - node_rules period: null @@ -96,7 +79,7 @@ inputs: dataset: kibana.stack_monitoring.stats type: metrics hosts: - - ${kubernetes.hints.kibana.stats.host|'http://localhost:5601'} + - ${kubernetes.hints.kibana.stats.host|kubernetes.hints.kibana.host|'http://localhost:5601'} metricsets: - stats period: null @@ -105,7 +88,7 @@ inputs: dataset: kibana.stack_monitoring.status type: metrics hosts: - - ${kubernetes.hints.kibana.status.host|'http://localhost:5601'} + - ${kubernetes.hints.kibana.status.host|kubernetes.hints.kibana.host|'http://localhost:5601'} metricsets: - status period: null diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml index 6ba62de3274..f4b3c2a23b3 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml @@ -59,17 +59,17 @@ inputs: dataset: logstash.stack_monitoring.node type: metrics hosts: - - ${kubernetes.hints.logstash.node.host|'http://localhost:9600'} + - ${kubernetes.hints.logstash.node.host|kubernetes.hints.logstash.host|'http://localhost:9600'} metricsets: - node - period: ${kubernetes.hints.logstash.node.period|'10s'} + period: ${kubernetes.hints.logstash.node.period|kubernetes.hints.logstash.period|'10s'} - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true data_stream: dataset: logstash.stack_monitoring.node_stats type: metrics hosts: - - ${kubernetes.hints.logstash.node_stats.host|'http://localhost:9600'} + - ${kubernetes.hints.logstash.node_stats.host|kubernetes.hints.logstash.host|'http://localhost:9600'} metricsets: - node_stats - period: ${kubernetes.hints.logstash.node_stats.period|'10s'} + period: ${kubernetes.hints.logstash.node_stats.period|kubernetes.hints.logstash.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml index 5ac70293051..b3215c1cec9 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml @@ -1,16 +1,4 @@ inputs: - - name: winlog-microsoft_sqlserver - type: winlog - use_output: default - streams: - - condition: ${kubernetes.hints.microsoft_sqlserver.audit.enabled} == true or ${kubernetes.hints.microsoft_sqlserver.enabled} == true - data_stream: - dataset: microsoft_sqlserver.audit - type: logs - event_id: 33205 - ignore_older: 72h - name: Security - data_stream.namespace: default - name: filestream-microsoft_sqlserver type: filestream use_output: default @@ -48,10 +36,10 @@ inputs: driver: mssql dynamic_counter_name: Memory Grants Pending hosts: - - sqlserver://${kubernetes.hints.microsoft_sqlserver.performance.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.performance.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.performance.host|'localhost'}:1433 + - sqlserver://${kubernetes.hints.microsoft_sqlserver.performance.username|kubernetes.hints.microsoft_sqlserver.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.performance.password|kubernetes.hints.microsoft_sqlserver.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.performance.host|kubernetes.hints.microsoft_sqlserver.host|'localhost'}:1433 metricsets: - query - period: ${kubernetes.hints.microsoft_sqlserver.performance.period|'60s'} + period: ${kubernetes.hints.microsoft_sqlserver.performance.period|kubernetes.hints.microsoft_sqlserver.period|'60s'} raw_data.enabled: true sql_queries: - query: SELECT cntr_value As 'user_connections' FROM sys.dm_os_performance_counters WHERE counter_name= 'User Connections' @@ -94,10 +82,10 @@ inputs: type: metrics driver: mssql hosts: - - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|'localhost'}:1433 + - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|kubernetes.hints.microsoft_sqlserver.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|kubernetes.hints.microsoft_sqlserver.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|kubernetes.hints.microsoft_sqlserver.host|'localhost'}:1433 metricsets: - query - period: ${kubernetes.hints.microsoft_sqlserver.transaction_log.period|'60s'} + period: ${kubernetes.hints.microsoft_sqlserver.transaction_log.period|kubernetes.hints.microsoft_sqlserver.period|'60s'} raw_data.enabled: true sql_queries: - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=1; @@ -125,3 +113,15 @@ inputs: - query: SELECT 'msdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage msdb response_format: table data_stream.namespace: default + - name: winlog-microsoft_sqlserver + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.audit.enabled} == true or ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.audit + type: logs + event_id: 33205 + ignore_older: 72h + name: Security + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml index 23139e47852..4d78d740508 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml @@ -364,7 +364,7 @@ inputs: streams: - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: mimecast.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml index ece2d4439eb..9804e4f6cb7 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -30,44 +30,44 @@ inputs: dataset: mongodb.collstats type: metrics hosts: - - ${kubernetes.hints.mongodb.collstats.host|'localhost:27017'} + - ${kubernetes.hints.mongodb.collstats.host|kubernetes.hints.mongodb.host|'localhost:27017'} metricsets: - collstats - period: ${kubernetes.hints.mongodb.collstats.period|'10s'} + period: ${kubernetes.hints.mongodb.collstats.period|kubernetes.hints.mongodb.period|'10s'} - condition: ${kubernetes.hints.mongodb.dbstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true data_stream: dataset: mongodb.dbstats type: metrics hosts: - - ${kubernetes.hints.mongodb.dbstats.host|'localhost:27017'} + - ${kubernetes.hints.mongodb.dbstats.host|kubernetes.hints.mongodb.host|'localhost:27017'} metricsets: - dbstats - period: ${kubernetes.hints.mongodb.dbstats.period|'10s'} + period: ${kubernetes.hints.mongodb.dbstats.period|kubernetes.hints.mongodb.period|'10s'} - condition: ${kubernetes.hints.mongodb.metrics.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true data_stream: dataset: mongodb.metrics type: metrics hosts: - - ${kubernetes.hints.mongodb.metrics.host|'localhost:27017'} + - ${kubernetes.hints.mongodb.metrics.host|kubernetes.hints.mongodb.host|'localhost:27017'} metricsets: - metrics - period: ${kubernetes.hints.mongodb.metrics.period|'10s'} + period: ${kubernetes.hints.mongodb.metrics.period|kubernetes.hints.mongodb.period|'10s'} - condition: ${kubernetes.hints.mongodb.replstatus.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true data_stream: dataset: mongodb.replstatus type: metrics hosts: - - ${kubernetes.hints.mongodb.replstatus.host|'localhost:27017'} + - ${kubernetes.hints.mongodb.replstatus.host|kubernetes.hints.mongodb.host|'localhost:27017'} metricsets: - replstatus - period: ${kubernetes.hints.mongodb.replstatus.period|'10s'} + period: ${kubernetes.hints.mongodb.replstatus.period|kubernetes.hints.mongodb.period|'10s'} - condition: ${kubernetes.hints.mongodb.status.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true data_stream: dataset: mongodb.status type: metrics hosts: - - ${kubernetes.hints.mongodb.status.host|'localhost:27017'} + - ${kubernetes.hints.mongodb.status.host|kubernetes.hints.mongodb.host|'localhost:27017'} metricsets: - status - period: ${kubernetes.hints.mongodb.status.period|'10s'} + period: ${kubernetes.hints.mongodb.status.period|kubernetes.hints.mongodb.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml index 234caeeb40c..aa8ad8e0a02 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml @@ -56,27 +56,32 @@ inputs: dataset: mysql.galera_status type: metrics hosts: - - ${kubernetes.hints.mysql.galera_status.host|'tcp(127.0.0.1:3306)/'} + - ${kubernetes.hints.mysql.galera_status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} metricsets: - galera_status - password: ${kubernetes.hints.mysql.galera_status.password|'test'} - period: ${kubernetes.hints.mysql.galera_status.period|'10s'} - username: ${kubernetes.hints.mysql.galera_status.username|'root'} - - condition: ${kubernetes.hints.mysql.performance.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + password: ${kubernetes.hints.mysql.galera_status.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.galera_status.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.galera_status.username|kubernetes.hints.mysql.username|'root'} + - condition: ${kubernetes.hints.mysql.performance.enabled} == true or ${kubernetes.hints.mysql.enabled} == true data_stream: dataset: mysql.performance type: metrics + hosts: + - ${kubernetes.hints.mysql.performance.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} metricsets: - performance + password: ${kubernetes.hints.mysql.performance.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.performance.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.performance.username|kubernetes.hints.mysql.username|'root'} - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true data_stream: dataset: mysql.status type: metrics hosts: - - ${kubernetes.hints.mysql.status.host|'tcp(127.0.0.1:3306)/'} + - ${kubernetes.hints.mysql.status.host|kubernetes.hints.mysql.host|'tcp(127.0.0.1:3306)/'} metricsets: - status - password: ${kubernetes.hints.mysql.status.password|'test'} - period: ${kubernetes.hints.mysql.status.period|'10s'} - username: ${kubernetes.hints.mysql.status.username|'root'} + password: ${kubernetes.hints.mysql.status.password|kubernetes.hints.mysql.password|'test'} + period: ${kubernetes.hints.mysql.status.period|kubernetes.hints.mysql.period|'10s'} + username: ${kubernetes.hints.mysql.status.username|kubernetes.hints.mysql.username|'root'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml index 91525210374..af4da4e87ec 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml @@ -30,53 +30,53 @@ inputs: dataset: nats.connection type: metrics hosts: - - ${kubernetes.hints.nats.connection.host|'localhost:8222'} + - ${kubernetes.hints.nats.connection.host|kubernetes.hints.nats.host|'localhost:8222'} metricsets: - connection - period: ${kubernetes.hints.nats.connection.period|'10s'} + period: ${kubernetes.hints.nats.connection.period|kubernetes.hints.nats.period|'10s'} - condition: ${kubernetes.hints.nats.connections.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.connections type: metrics hosts: - - ${kubernetes.hints.nats.connections.host|'localhost:8222'} + - ${kubernetes.hints.nats.connections.host|kubernetes.hints.nats.host|'localhost:8222'} metricsets: - connections - period: ${kubernetes.hints.nats.connections.period|'10s'} + period: ${kubernetes.hints.nats.connections.period|kubernetes.hints.nats.period|'10s'} - condition: ${kubernetes.hints.nats.route.enabled} == true and ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.route type: metrics hosts: - - ${kubernetes.hints.nats.route.host|'localhost:8222'} + - ${kubernetes.hints.nats.route.host|kubernetes.hints.nats.host|'localhost:8222'} metricsets: - route - period: ${kubernetes.hints.nats.route.period|'10s'} + period: ${kubernetes.hints.nats.route.period|kubernetes.hints.nats.period|'10s'} - condition: ${kubernetes.hints.nats.routes.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.routes type: metrics hosts: - - ${kubernetes.hints.nats.routes.host|'localhost:8222'} + - ${kubernetes.hints.nats.routes.host|kubernetes.hints.nats.host|'localhost:8222'} metricsets: - routes - period: ${kubernetes.hints.nats.routes.period|'10s'} + period: ${kubernetes.hints.nats.routes.period|kubernetes.hints.nats.period|'10s'} - condition: ${kubernetes.hints.nats.stats.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.stats type: metrics hosts: - - ${kubernetes.hints.nats.stats.host|'localhost:8222'} + - ${kubernetes.hints.nats.stats.host|kubernetes.hints.nats.host|'localhost:8222'} metricsets: - stats - period: ${kubernetes.hints.nats.stats.period|'10s'} + period: ${kubernetes.hints.nats.stats.period|kubernetes.hints.nats.period|'10s'} - condition: ${kubernetes.hints.nats.subscriptions.enabled} == true or ${kubernetes.hints.nats.enabled} == true data_stream: dataset: nats.subscriptions type: metrics hosts: - - ${kubernetes.hints.nats.subscriptions.host|'localhost:8222'} + - ${kubernetes.hints.nats.subscriptions.host|kubernetes.hints.nats.host|'localhost:8222'} metricsets: - subscriptions - period: ${kubernetes.hints.nats.subscriptions.period|'10s'} + period: ${kubernetes.hints.nats.subscriptions.period|kubernetes.hints.nats.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml index d2bb80601df..7976c094a38 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml @@ -30,7 +30,7 @@ inputs: streams: - condition: ${kubernetes.hints.netflow.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: netflow.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml index a9b6693e372..c42fff19dd3 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -1,19 +1,4 @@ inputs: - - name: nginx/metrics-nginx - type: nginx/metrics - use_output: default - streams: - - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true - data_stream: - dataset: nginx.stubstatus - type: metrics - hosts: - - ${kubernetes.hints.nginx.stubstatus.host|'http://127.0.0.1:80'} - metricsets: - - stubstatus - period: ${kubernetes.hints.nginx.stubstatus.period|'10s'} - server_status_path: /nginx_status - data_stream.namespace: default - name: filestream-nginx type: filestream use_output: default @@ -140,3 +125,18 @@ inputs: - forwarded - nginx-error data_stream.namespace: default + - name: nginx/metrics-nginx + type: nginx/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.stubstatus + type: metrics + hosts: + - ${kubernetes.hints.nginx.stubstatus.host|kubernetes.hints.nginx.host|'http://127.0.0.1:80'} + metricsets: + - stubstatus + period: ${kubernetes.hints.nginx.stubstatus.period|kubernetes.hints.nginx.period|'10s'} + server_status_path: /nginx_status + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml index 8e846586d4b..c6a5cb725a3 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml @@ -29,16 +29,33 @@ inputs: type: sql/metrics use_output: default streams: + - condition: ${kubernetes.hints.oracle.memory.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.memory + type: metrics + driver: oracle + hosts: + - ${kubernetes.hints.oracle.memory.host|kubernetes.hints.oracle.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + merge_results: true + metricsets: + - query + period: ${kubernetes.hints.oracle.memory.period|kubernetes.hints.oracle.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: select name, value from V$PGASTAT where name in ('aggregate PGA auto target','global memory bound', 'total PGA allocated', 'total PGA used for auto workareas', 'total PGA inuse', 'maximum PGA allocated', 'total freeable PGA memory', 'cache hit percentage', 'aggregate PGA target parameter') + response_format: variables + - query: select 'sga free memory' as NAME, sum(decode(name,'free memory',bytes)) as VALUE from v$sgastat where pool = 'shared pool' union select 'sga total memory' as NAME, sum(bytes) as VALUE from v$sgastat where pool = 'shared pool' + response_format: variables - condition: ${kubernetes.hints.oracle.performance.enabled} == true and ${kubernetes.hints.oracle.enabled} == true data_stream: dataset: oracle.performance type: metrics driver: oracle hosts: - - ${kubernetes.hints.oracle.performance.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + - ${kubernetes.hints.oracle.performance.host|kubernetes.hints.oracle.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} metricsets: - query - period: ${kubernetes.hints.oracle.performance.period|'60s'} + period: ${kubernetes.hints.oracle.performance.period|kubernetes.hints.oracle.period|'60s'} raw_data.enabled: true sql_queries: - query: SELECT name, physical_reads, db_block_gets, consistent_gets, 1 - (physical_reads / (db_block_gets + consistent_gets)) "Hit_Ratio" FROM V$BUFFER_POOL_STATISTICS @@ -49,6 +66,12 @@ inputs: response_format: table - query: SELECT 'lock_requests' "Ratio" , AVG(gethitratio) FROM V$LIBRARYCACHE UNION SELECT 'pin_requests' "Ratio", AVG(pinhitratio) FROM V$LIBRARYCACHE UNION SELECT 'io_reloads' "Ratio", (SUM(reloads) / SUM(pins)) FROM V$LIBRARYCACHE response_format: variables + - query: SELECT COUNT(*) as "failed_db_jobs" FROM dba_jobs WHERE NVL(failures, 0) < > 0 + response_format: table + - query: select 'active_session_count' as name, count(s.status) as value from gv$session s, v$process p where p.addr=s.paddr and s.status='ACTIVE' union select 'inactive_session_count' as name, count(s.status) as value from gv$session s, v$process p where p.addr=s.paddr and s.status='INACTIVE' union select 'inactive_morethan_onehr' as name, count(s.status) as value from gv$session s, v$process p where p.addr=s.paddr and s.last_call_et > 3600 and s.status='INACTIVE' + response_format: variables + - query: select WAIT_CLASS, TOTAL_WAITS, round(100 * (TOTAL_WAITS / SUM_WAITS),2) PCT_WAITS, ROUND((TIME_WAITED / 100),2) TIME_WAITED_SECS, round(100 * (TIME_WAITED / SUM_TIME),2) PCT_TIME from (select WAIT_CLASS, TOTAL_WAITS, TIME_WAITED from V$SYSTEM_WAIT_CLASS where WAIT_CLASS != 'Idle'), (select sum(TOTAL_WAITS) SUM_WAITS, sum(TIME_WAITED) SUM_TIME from V$SYSTEM_WAIT_CLASS where WAIT_CLASS != 'Idle') order by 5 desc + response_format: table - condition: ${kubernetes.hints.oracle.sysmetric.enabled} == true and ${kubernetes.hints.oracle.enabled} == true data_stream: dataset: oracle.sysmetric @@ -56,14 +79,28 @@ inputs: driver: oracle dynamic_metric_name_filter: '%' hosts: - - ${kubernetes.hints.oracle.sysmetric.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + - ${kubernetes.hints.oracle.sysmetric.host|kubernetes.hints.oracle.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} metricsets: - query - period: ${kubernetes.hints.oracle.sysmetric.period|'60s'} + period: ${kubernetes.hints.oracle.sysmetric.period|kubernetes.hints.oracle.period|'60s'} raw_data.enabled: true sql_queries: - query: SELECT METRIC_NAME, VALUE FROM V$SYSMETRIC WHERE GROUP_ID = 2 and METRIC_NAME LIKE '%' response_format: variables + - condition: ${kubernetes.hints.oracle.system_statistics.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.system_statistics + type: metrics + driver: oracle + hosts: + - ${kubernetes.hints.oracle.system_statistics.host|kubernetes.hints.oracle.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.system_statistics.period|kubernetes.hints.oracle.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT NAME, VALUE FROM V$SYSSTAT WHERE NAME IN ( 'bytes received via SQL*Net from client', 'bytes received via SQL*Net from dblink', 'bytes sent via SQL*Net to client', 'bytes sent via SQL*Net to dblink', 'CPU used by this session', 'db block changes', 'db block gets from cache', 'DBWR checkpoint buffers written', 'DBWR checkpoints', 'DML statements parallelized', 'enqueue conversions', 'enqueue deadlocks', 'enqueue releases', 'enqueue requests', 'enqueue timeouts', 'enqueue waits', 'exchange deadlocks', 'execute count', 'gc current block receive time', 'index fast full scans (direct read)', 'index fast full scans (full)', 'index fast full scans (rowid ranges)', 'lob reads', 'lob writes', 'logons current', 'opened cursors current', 'Parallel operations not downgraded', 'parse count (hard)', 'parse count (total)', 'parse time cpu', 'parse time elapsed', 'physical read bytes', 'physical read IO requests', 'physical read total bytes', 'physical read total IO requests', 'physical reads', 'physical write bytes', 'physical write IO requests', 'physical write total bytes', 'physical write total IO requests', 'physical writes', 'physical writes direct', 'physical writes from cache', 'process last non-idle time', 'queries parallelized', 'recovery blocks read', 'recursive calls', 'recursive cpu usage', 'redo blocks written', 'redo buffer allocation retries', 'redo log space requests', 'redo log space wait time', 'redo size', 'redo synch time', 'redo write time', 'redo writes', 'session cursor cache count', 'session cursor cache hits', 'session logical reads', 'session stored procedure space', 'sorts (disk)', 'sorts (memory)', 'sorts (rows)', 'table scan rows gotten', 'table scans (direct read)', 'table scans (long tables)', 'table scans (rowid ranges)', 'transaction rollbacks', 'user calls', 'user commits', 'user rollbacks', 'DB time', 'OS System time used', 'OS User time used', 'SMON posted for instance recovery', 'SMON posted for txn recovery for other instances', 'java call heap live size', 'java call heap total size', 'java call heap used size') + response_format: variables - condition: ${kubernetes.hints.oracle.tablespace.enabled} == true and ${kubernetes.hints.oracle.enabled} == true data_stream: dataset: oracle.tablespace @@ -71,10 +108,10 @@ inputs: driver: oracle dynamic_metric_name_filter: "" hosts: - - ${kubernetes.hints.oracle.tablespace.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + - ${kubernetes.hints.oracle.tablespace.host|kubernetes.hints.oracle.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} metricsets: - query - period: ${kubernetes.hints.oracle.tablespace.period|'60s'} + period: ${kubernetes.hints.oracle.tablespace.period|kubernetes.hints.oracle.period|'60s'} raw_data.enabled: true sql_queries: - query: WITH data_files AS (SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status FROM sys.dba_data_files UNION SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, status AS ONLINE_STATUS FROM sys.dba_temp_files), spaces AS (SELECT b.tablespace_name TB_NAME, tbs_size TB_SIZE_USED, a.free_space TB_SIZE_FREE FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+) = b.tablespace_name AND a.tablespace_name != 'TEMP'), temp_spaces AS (SELECT tablespace_name, tablespace_size, allocated_space, free_space FROM dba_temp_free_space WHERE tablespace_name = 'TEMP'), details AS (SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, sp.tb_size_used, sp.tb_size_free FROM data_files df, spaces sp WHERE df.tablespace_name = sp.tb_name UNION SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, tsp.tablespace_size - tsp.free_space AS TB_SIZE_USED, tsp.free_space AS TB_SIZE_FREE FROM data_files df, temp_spaces tsp WHERE df.tablespace_name = tsp.tablespace_name) SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status, tb_size_used, tb_size_free, SUM(bytes) over() AS TOTAL_BYTES FROM details diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml index 93c07883f03..8cea3505601 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml @@ -1,4 +1,37 @@ inputs: + - name: filestream-panw + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + exclude_files: + - .gz$ + fields: + _conf: + external_zones: + - untrust + internal_zones: + - trust + tz_offset: local + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.panw.panos.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - panw-panos + - forwarded + data_stream.namespace: default - name: tcp-panw type: tcp use_output: default @@ -59,36 +92,3 @@ inputs: - panw-panos - forwarded data_stream.namespace: default - - name: filestream-panw - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true - data_stream: - dataset: panw.panos - type: logs - exclude_files: - - .gz$ - fields: - _conf: - external_zones: - - untrust - internal_zones: - - trust - tz_offset: local - fields_under_root: true - parsers: - - container: - format: auto - stream: ${kubernetes.hints.panw.panos.stream|'all'} - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - processors: - - add_locale: null - prospector: - scanner: - symlinks: true - tags: - - panw-panos - - forwarded - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml index ec6a58fd9b2..bbd2aebfa4b 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml @@ -73,7 +73,7 @@ inputs: streams: - condition: ${kubernetes.hints.panw_cortex_xdr.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: panw_cortex_xdr.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml index e4541f90639..3a52d749ed7 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml @@ -45,7 +45,7 @@ inputs: streams: - condition: ${kubernetes.hints.pfsense.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: pfsense.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml index a9abf518a9a..8b40d2524d2 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml @@ -34,35 +34,35 @@ inputs: dataset: postgresql.activity type: metrics hosts: - - ${kubernetes.hints.postgresql.activity.host|'postgres://localhost:5432'} + - ${kubernetes.hints.postgresql.activity.host|kubernetes.hints.postgresql.host|'postgres://localhost:5432'} metricsets: - activity - period: ${kubernetes.hints.postgresql.activity.period|'10s'} + period: ${kubernetes.hints.postgresql.activity.period|kubernetes.hints.postgresql.period|'10s'} - condition: ${kubernetes.hints.postgresql.bgwriter.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true data_stream: dataset: postgresql.bgwriter type: metrics hosts: - - ${kubernetes.hints.postgresql.bgwriter.host|'postgres://localhost:5432'} + - ${kubernetes.hints.postgresql.bgwriter.host|kubernetes.hints.postgresql.host|'postgres://localhost:5432'} metricsets: - bgwriter - period: ${kubernetes.hints.postgresql.bgwriter.period|'10s'} + period: ${kubernetes.hints.postgresql.bgwriter.period|kubernetes.hints.postgresql.period|'10s'} - condition: ${kubernetes.hints.postgresql.database.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true data_stream: dataset: postgresql.database type: metrics hosts: - - ${kubernetes.hints.postgresql.database.host|'postgres://localhost:5432'} + - ${kubernetes.hints.postgresql.database.host|kubernetes.hints.postgresql.host|'postgres://localhost:5432'} metricsets: - database - period: ${kubernetes.hints.postgresql.database.period|'10s'} + period: ${kubernetes.hints.postgresql.database.period|kubernetes.hints.postgresql.period|'10s'} - condition: ${kubernetes.hints.postgresql.statement.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true data_stream: dataset: postgresql.statement type: metrics hosts: - - ${kubernetes.hints.postgresql.statement.host|'postgres://localhost:5432'} + - ${kubernetes.hints.postgresql.statement.host|kubernetes.hints.postgresql.host|'postgres://localhost:5432'} metricsets: - statement - period: ${kubernetes.hints.postgresql.statement.period|'10s'} + period: ${kubernetes.hints.postgresql.statement.period|kubernetes.hints.postgresql.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml index 2a7e630c9cf..1bb26ac4da2 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml @@ -3,38 +3,35 @@ inputs: type: prometheus/metrics use_output: default streams: - - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - condition: ${kubernetes.hints.prometheus.collector.enabled} == true or ${kubernetes.hints.prometheus.enabled} == true + - condition: ${kubernetes.hints.prometheus.collector.enabled} == true or ${kubernetes.hints.prometheus.enabled} == true data_stream: dataset: prometheus.collector type: metrics hosts: - - ${kubernetes.hints.prometheus.collector.host|'localhost:9090'} + - ${kubernetes.hints.prometheus.collector.host|kubernetes.hints.prometheus.host|'localhost:9090'} metrics_filters.exclude: null metrics_filters.include: null metrics_path: /metrics metricsets: - collector - password: ${kubernetes.hints.prometheus.collector.password|'secret'} - period: ${kubernetes.hints.prometheus.collector.period|'10s'} + password: ${kubernetes.hints.prometheus.collector.password|kubernetes.hints.prometheus.password|'secret'} + period: ${kubernetes.hints.prometheus.collector.period|kubernetes.hints.prometheus.period|'10s'} rate_counters: true - ssl.certificate_authorities: - - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt use_types: true - username: ${kubernetes.hints.prometheus.collector.username|'user'} + username: ${kubernetes.hints.prometheus.collector.username|kubernetes.hints.prometheus.username|'user'} - condition: ${kubernetes.hints.prometheus.query.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true data_stream: dataset: prometheus.query type: metrics hosts: - - ${kubernetes.hints.prometheus.query.host|'localhost:9090'} + - ${kubernetes.hints.prometheus.query.host|kubernetes.hints.prometheus.host|'localhost:9090'} metricsets: - query - period: ${kubernetes.hints.prometheus.query.period|'10s'} + period: ${kubernetes.hints.prometheus.query.period|kubernetes.hints.prometheus.period|'10s'} queries: - name: instant_vector params: - query: sum(rate(prometheus_http_requests_total[1m])) + query: sum(rate(prometheus_http_requests_total[2m])) path: /api/v1/query - name: range_vector params: @@ -73,7 +70,7 @@ inputs: streams: - condition: ${kubernetes.hints.prometheus.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: prometheus.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml index 546faa79901..3d517763b74 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml @@ -1,4 +1,23 @@ inputs: + - name: tcp-qnap_nas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true or ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default - name: udp-qnap_nas type: udp use_output: default @@ -24,7 +43,7 @@ inputs: streams: - condition: ${kubernetes.hints.qnap_nas.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: qnap_nas.container_logs type: logs exclude_files: [] exclude_lines: [] @@ -39,22 +58,3 @@ inputs: symlinks: true tags: [] data_stream.namespace: default - - name: tcp-qnap_nas - type: tcp - use_output: default - streams: - - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true or ${kubernetes.hints.qnap_nas.enabled} == true - data_stream: - dataset: qnap_nas.log - type: logs - host: localhost:9301 - processors: - - add_locale: null - - add_fields: - fields: - tz_offset: local - target: _tmp - tags: - - qnap-nas - - forwarded - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml index 942c4fa6911..53701dfa769 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml @@ -36,44 +36,44 @@ inputs: dataset: rabbitmq.connection type: metrics hosts: - - ${kubernetes.hints.rabbitmq.connection.host|'localhost:15672'} + - ${kubernetes.hints.rabbitmq.connection.host|kubernetes.hints.rabbitmq.host|'localhost:15672'} metricsets: - connection - password: ${kubernetes.hints.rabbitmq.connection.password|''} - period: ${kubernetes.hints.rabbitmq.connection.period|'10s'} - username: ${kubernetes.hints.rabbitmq.connection.username|''} + password: ${kubernetes.hints.rabbitmq.connection.password|kubernetes.hints.rabbitmq.password|''} + period: ${kubernetes.hints.rabbitmq.connection.period|kubernetes.hints.rabbitmq.period|'10s'} + username: ${kubernetes.hints.rabbitmq.connection.username|kubernetes.hints.rabbitmq.username|''} - condition: ${kubernetes.hints.rabbitmq.exchange.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true data_stream: dataset: rabbitmq.exchange type: metrics hosts: - - ${kubernetes.hints.rabbitmq.exchange.host|'localhost:15672'} + - ${kubernetes.hints.rabbitmq.exchange.host|kubernetes.hints.rabbitmq.host|'localhost:15672'} metricsets: - exchange - password: ${kubernetes.hints.rabbitmq.exchange.password|''} - period: ${kubernetes.hints.rabbitmq.exchange.period|'10s'} - username: ${kubernetes.hints.rabbitmq.exchange.username|''} + password: ${kubernetes.hints.rabbitmq.exchange.password|kubernetes.hints.rabbitmq.password|''} + period: ${kubernetes.hints.rabbitmq.exchange.period|kubernetes.hints.rabbitmq.period|'10s'} + username: ${kubernetes.hints.rabbitmq.exchange.username|kubernetes.hints.rabbitmq.username|''} - condition: ${kubernetes.hints.rabbitmq.node.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true data_stream: dataset: rabbitmq.node type: metrics hosts: - - ${kubernetes.hints.rabbitmq.node.host|'localhost:15672'} + - ${kubernetes.hints.rabbitmq.node.host|kubernetes.hints.rabbitmq.host|'localhost:15672'} metricsets: - node node.collect: node - password: ${kubernetes.hints.rabbitmq.node.password|''} - period: ${kubernetes.hints.rabbitmq.node.period|'10s'} - username: ${kubernetes.hints.rabbitmq.node.username|''} + password: ${kubernetes.hints.rabbitmq.node.password|kubernetes.hints.rabbitmq.password|''} + period: ${kubernetes.hints.rabbitmq.node.period|kubernetes.hints.rabbitmq.period|'10s'} + username: ${kubernetes.hints.rabbitmq.node.username|kubernetes.hints.rabbitmq.username|''} - condition: ${kubernetes.hints.rabbitmq.queue.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true data_stream: dataset: rabbitmq.queue type: metrics hosts: - - ${kubernetes.hints.rabbitmq.queue.host|'localhost:15672'} + - ${kubernetes.hints.rabbitmq.queue.host|kubernetes.hints.rabbitmq.host|'localhost:15672'} metricsets: - queue - password: ${kubernetes.hints.rabbitmq.queue.password|''} - period: ${kubernetes.hints.rabbitmq.queue.period|'10s'} - username: ${kubernetes.hints.rabbitmq.queue.username|''} + password: ${kubernetes.hints.rabbitmq.queue.password|kubernetes.hints.rabbitmq.password|''} + period: ${kubernetes.hints.rabbitmq.queue.period|kubernetes.hints.rabbitmq.period|'10s'} + username: ${kubernetes.hints.rabbitmq.queue.username|kubernetes.hints.rabbitmq.username|''} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml index 31731f6c1a5..d8db78aee6d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml @@ -32,8 +32,8 @@ inputs: dataset: redis.slowlog type: logs hosts: - - ${kubernetes.hints.redis.slowlog.host|'127.0.0.1:6379'} - password: ${kubernetes.hints.redis.slowlog.password|''} + - ${kubernetes.hints.redis.slowlog.host|kubernetes.hints.redis.host|'127.0.0.1:6379'} + password: ${kubernetes.hints.redis.slowlog.password|kubernetes.hints.redis.password|''} data_stream.namespace: default - name: redis/metrics-redis type: redis/metrics @@ -44,20 +44,20 @@ inputs: dataset: redis.info type: metrics hosts: - - ${kubernetes.hints.redis.info.host|'127.0.0.1:6379'} + - ${kubernetes.hints.redis.info.host|kubernetes.hints.redis.host|'127.0.0.1:6379'} idle_timeout: 20s maxconn: 10 metricsets: - info network: tcp - password: ${kubernetes.hints.redis.info.password|''} - period: ${kubernetes.hints.redis.info.period|'10s'} + password: ${kubernetes.hints.redis.info.password|kubernetes.hints.redis.password|''} + period: ${kubernetes.hints.redis.info.period|kubernetes.hints.redis.period|'10s'} - condition: ${kubernetes.hints.redis.key.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.key type: metrics hosts: - - ${kubernetes.hints.redis.key.host|'127.0.0.1:6379'} + - ${kubernetes.hints.redis.key.host|kubernetes.hints.redis.host|'127.0.0.1:6379'} idle_timeout: 20s key.patterns: - limit: 20 @@ -66,19 +66,19 @@ inputs: metricsets: - key network: tcp - password: ${kubernetes.hints.redis.key.password|''} - period: ${kubernetes.hints.redis.key.period|'10s'} + password: ${kubernetes.hints.redis.key.password|kubernetes.hints.redis.password|''} + period: ${kubernetes.hints.redis.key.period|kubernetes.hints.redis.period|'10s'} - condition: ${kubernetes.hints.redis.keyspace.enabled} == true or ${kubernetes.hints.redis.enabled} == true data_stream: dataset: redis.keyspace type: metrics hosts: - - ${kubernetes.hints.redis.keyspace.host|'127.0.0.1:6379'} + - ${kubernetes.hints.redis.keyspace.host|kubernetes.hints.redis.host|'127.0.0.1:6379'} idle_timeout: 20s maxconn: 10 metricsets: - keyspace network: tcp - password: ${kubernetes.hints.redis.keyspace.password|''} - period: ${kubernetes.hints.redis.keyspace.period|'10s'} + password: ${kubernetes.hints.redis.keyspace.password|kubernetes.hints.redis.password|''} + period: ${kubernetes.hints.redis.keyspace.period|kubernetes.hints.redis.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml index 990a4372e8b..aee90809a9f 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml @@ -5,7 +5,7 @@ inputs: streams: - condition: ${kubernetes.hints.security_detection_engine.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: security_detection_engine.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml index 7c06b222d78..5401309fbe1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -200,7 +200,7 @@ inputs: streams: - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: sentinel_one.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml index aef353751ec..4f857c2233c 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml @@ -1,25 +1,4 @@ inputs: - - name: filestream-snyk - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.snyk.container_logs.enabled} == true - data_stream: - dataset: kubernetes.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default - name: httpjson-snyk type: httpjson use_output: default @@ -137,3 +116,24 @@ inputs: - forwarded - snyk-vulnerabilities data_stream.namespace: default + - name: filestream-snyk + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.container_logs.enabled} == true + data_stream: + dataset: snyk.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml index 9fdee28a731..ea77d57ed81 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml @@ -31,26 +31,26 @@ inputs: dataset: stan.channels type: metrics hosts: - - ${kubernetes.hints.stan.channels.host|'localhost:8222'} + - ${kubernetes.hints.stan.channels.host|kubernetes.hints.stan.host|'localhost:8222'} metricsets: - channels - period: ${kubernetes.hints.stan.channels.period|'60s'} + period: ${kubernetes.hints.stan.channels.period|kubernetes.hints.stan.period|'60s'} - condition: ${kubernetes.hints.stan.stats.enabled} == true or ${kubernetes.hints.stan.enabled} == true data_stream: dataset: stan.stats type: metrics hosts: - - ${kubernetes.hints.stan.stats.host|'localhost:8222'} + - ${kubernetes.hints.stan.stats.host|kubernetes.hints.stan.host|'localhost:8222'} metricsets: - stats - period: ${kubernetes.hints.stan.stats.period|'60s'} + period: ${kubernetes.hints.stan.stats.period|kubernetes.hints.stan.period|'60s'} - condition: ${kubernetes.hints.stan.subscriptions.enabled} == true or ${kubernetes.hints.stan.enabled} == true data_stream: dataset: stan.subscriptions type: metrics hosts: - - ${kubernetes.hints.stan.subscriptions.host|'localhost:8222'} + - ${kubernetes.hints.stan.subscriptions.host|kubernetes.hints.stan.host|'localhost:8222'} metricsets: - subscriptions - period: ${kubernetes.hints.stan.subscriptions.period|'60s'} + period: ${kubernetes.hints.stan.subscriptions.period|kubernetes.hints.stan.period|'60s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml index 8e3ca7ce297..fac3f6cbd93 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -1,23 +1,4 @@ inputs: - - name: udp-symantec_endpoint - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true - data_stream: - dataset: symantec_endpoint.log - type: logs - fields: - _conf: - remove_mapped_fields: false - tz_offset: UTC - fields_under_root: true - host: localhost:9008 - max_message_size: 1 MiB - tags: - - symantec-endpoint-log - - forwarded - data_stream.namespace: default - name: filestream-symantec_endpoint type: filestream use_output: default @@ -65,3 +46,22 @@ inputs: - symantec-endpoint-log - forwarded data_stream.namespace: default + - name: udp-symantec_endpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml index 2f375b1a3f0..7f754909d1a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml @@ -1,4 +1,25 @@ inputs: + - name: filestream-synthetics + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.synthetics.container_logs.enabled} == true + data_stream: + dataset: synthetics.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default - name: synthetics/http-synthetics type: synthetics/http use_output: default @@ -37,7 +58,7 @@ inputs: dataset: tcp type: synthetics enabled: true - hosts: ${kubernetes.hints.synthetics.tcp.host|''} + hosts: ${kubernetes.hints.synthetics.tcp.host|kubernetes.hints.synthetics.host|''} name: null processors: - add_observer_metadata: @@ -62,7 +83,7 @@ inputs: dataset: icmp type: synthetics enabled: true - hosts: ${kubernetes.hints.synthetics.icmp.host|''} + hosts: ${kubernetes.hints.synthetics.icmp.host|kubernetes.hints.synthetics.host|''} name: null processors: - add_observer_metadata: @@ -125,24 +146,3 @@ inputs: monitor.fleet_managed: true target: "" data_stream.namespace: default - - name: filestream-synthetics - type: filestream - use_output: default - streams: - - condition: ${kubernetes.hints.synthetics.container_logs.enabled} == true - data_stream: - dataset: kubernetes.container_logs - type: logs - exclude_files: [] - exclude_lines: [] - parsers: - - container: - format: auto - stream: all - paths: - - /var/log/containers/*${kubernetes.hints.container_id}.log - prospector: - scanner: - symlinks: true - tags: [] - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml index 34c8d0d984e..0f20d16dfd1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml @@ -1,11 +1,21 @@ inputs: + - name: tcp-tcp + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.generic.enabled} == true or ${kubernetes.hints.tcp.enabled} == true + data_stream: + dataset: tcp.generic + type: logs + host: localhost:8080 + data_stream.namespace: default - name: filestream-tcp type: filestream use_output: default streams: - condition: ${kubernetes.hints.tcp.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: tcp.container_logs type: logs exclude_files: [] exclude_lines: [] @@ -20,13 +30,3 @@ inputs: symlinks: true tags: [] data_stream.namespace: default - - name: tcp-tcp - type: tcp - use_output: default - streams: - - condition: ${kubernetes.hints.tcp.generic.enabled} == true or ${kubernetes.hints.tcp.enabled} == true - data_stream: - dataset: tcp.generic - type: logs - host: localhost:8080 - data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml index 4ab26982389..15b7ffbbba9 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml @@ -30,8 +30,8 @@ inputs: dataset: traefik.health type: metrics hosts: - - ${kubernetes.hints.traefik.health.host|'localhost:8080'} + - ${kubernetes.hints.traefik.health.host|kubernetes.hints.traefik.host|'localhost:8080'} metricsets: - health - period: ${kubernetes.hints.traefik.health.period|'10s'} + period: ${kubernetes.hints.traefik.health.period|kubernetes.hints.traefik.period|'10s'} data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml index 60fa5ebf598..10883414be4 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml @@ -1,22 +1,11 @@ inputs: - - name: udp-udp - type: udp - use_output: default - streams: - - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true - data_stream: - dataset: udp.generic - type: logs - host: localhost:8080 - max_message_size: 10KiB - data_stream.namespace: default - name: filestream-udp type: filestream use_output: default streams: - condition: ${kubernetes.hints.udp.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: udp.container_logs type: logs exclude_files: [] exclude_lines: [] @@ -31,3 +20,14 @@ inputs: symlinks: true tags: [] data_stream.namespace: default + - name: udp-udp + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true + data_stream: + dataset: udp.generic + type: logs + host: localhost:8080 + max_message_size: 10KiB + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml index 5199734c315..411d454e031 100644 --- a/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml @@ -8,28 +8,28 @@ inputs: dataset: zookeeper.connection type: metrics hosts: - - ${kubernetes.hints.zookeeper.connection.host|'localhost:2181'} + - ${kubernetes.hints.zookeeper.connection.host|kubernetes.hints.zookeeper.host|'localhost:2181'} metricsets: - connection - period: ${kubernetes.hints.zookeeper.connection.period|'10s'} + period: ${kubernetes.hints.zookeeper.connection.period|kubernetes.hints.zookeeper.period|'10s'} - condition: ${kubernetes.hints.zookeeper.mntr.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true data_stream: dataset: zookeeper.mntr type: metrics hosts: - - ${kubernetes.hints.zookeeper.mntr.host|'localhost:2181'} + - ${kubernetes.hints.zookeeper.mntr.host|kubernetes.hints.zookeeper.host|'localhost:2181'} metricsets: - mntr - period: ${kubernetes.hints.zookeeper.mntr.period|'10s'} + period: ${kubernetes.hints.zookeeper.mntr.period|kubernetes.hints.zookeeper.period|'10s'} - condition: ${kubernetes.hints.zookeeper.server.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true data_stream: dataset: zookeeper.server type: metrics hosts: - - ${kubernetes.hints.zookeeper.server.host|'localhost:2181'} + - ${kubernetes.hints.zookeeper.server.host|kubernetes.hints.zookeeper.host|'localhost:2181'} metricsets: - server - period: ${kubernetes.hints.zookeeper.server.period|'10s'} + period: ${kubernetes.hints.zookeeper.server.period|kubernetes.hints.zookeeper.period|'10s'} data_stream.namespace: default - name: filestream-zookeeper type: filestream @@ -37,7 +37,7 @@ inputs: streams: - condition: ${kubernetes.hints.zookeeper.container_logs.enabled} == true data_stream: - dataset: kubernetes.container_logs + dataset: zookeeper.container_logs type: logs exclude_files: [] exclude_lines: [] diff --git a/dev-tools/packaging/templates/docker/docker-entrypoint.tmpl b/dev-tools/packaging/templates/docker/docker-entrypoint.tmpl deleted file mode 100644 index f073e21e318..00000000000 --- a/dev-tools/packaging/templates/docker/docker-entrypoint.tmpl +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -# Check if the the user has invoked the image with flags. -# eg. "{{ .BeatName }} -c {{ .BeatName }}.yml" -if [[ -z $1 ]] || [[ ${1:0:1} == '-' ]] ; then - exec {{ .BeatName }} "$@" -else - # They may be looking for a Beat subcommand, like "{{ .BeatName }} setup". - subcommands=$({{ .BeatName }} help \ - | awk 'BEGIN {RS=""; FS="\n"} /Available Commands:/' \ - | awk '/^\s+/ {print $1}') - - # If we _did_ get a subcommand, pass it to {{ .BeatName }}. - for subcommand in $subcommands; do - if [[ $1 == $subcommand ]]; then - exec {{ .BeatName }} "$@" - fi - done -fi - -# If neither of those worked, then they have specified the binary they want, so -# just do exactly as they say. -exec "$@" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index 0913b484712..e899f71e2fb 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "sort" "time" @@ -150,14 +151,17 @@ func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Con err, "fail to create API client with updated hosts", errors.TypeNetwork, errors.M("hosts", h.config.Fleet.Client.Hosts)) } + ctx, cancel := context.WithTimeout(ctx, apiStatusTimeout) defer cancel() - resp, err := client.Send(ctx, "GET", "/api/status", nil, nil, nil) + + resp, err := client.Send(ctx, http.MethodGet, "/api/status", nil, nil, nil) if err != nil { return errors.New( - err, "fail to communicate with updated API client hosts", + err, "fail to communicate with Fleet Server API client hosts", errors.TypeNetwork, errors.M("hosts", h.config.Fleet.Client.Hosts)) } + // discard body for proper cancellation and connection reuse _, _ = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() @@ -165,15 +169,17 @@ func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Con reader, err := fleetToReader(h.agentInfo, h.config) if err != nil { return errors.New( - err, "fail to persist updated API client hosts", + err, "fail to persist new Fleet Server API client hosts", errors.TypeUnexpected, errors.M("hosts", h.config.Fleet.Client.Hosts)) } + err = h.store.Save(reader) if err != nil { return errors.New( - err, "fail to persist updated API client hosts", + err, "fail to persist new Fleet Server API client hosts", errors.TypeFilesystem, errors.M("hosts", h.config.Fleet.Client.Hosts)) } + for _, setter := range h.setters { setter.SetClient(client) } diff --git a/internal/pkg/agent/cmd/common.go b/internal/pkg/agent/cmd/common.go index 7406419d405..7639b345ff7 100644 --- a/internal/pkg/agent/cmd/common.go +++ b/internal/pkg/agent/cmd/common.go @@ -69,7 +69,7 @@ func NewCommandWithArgs(args []string, streams *cli.IOStreams) *cobra.Command { cmd.AddCommand(newDiagnosticsCommand(args, streams)) cmd.AddCommand(newComponentCommandWithArgs(args, streams)) - // windows special hidden sub-command (only added on windows) + // windows special hidden sub-command (only added on Windows) reexec := newReExecWindowsCommand(args, streams) if reexec != nil { cmd.AddCommand(reexec) diff --git a/internal/pkg/fleetapi/client/client.go b/internal/pkg/fleetapi/client/client.go index 4470f0259a8..0f478497bb6 100644 --- a/internal/pkg/fleetapi/client/client.go +++ b/internal/pkg/fleetapi/client/client.go @@ -87,7 +87,7 @@ func NewWithConfig(log *logger.Logger, cfg remote.Config) (*remote.Client, error // ExtractError extracts error from a fleet-server response func ExtractError(resp io.Reader) error { - // Lets try to extract a high level fleet-server error. + // Let's try to extract a high level fleet-server error. e := &struct { StatusCode int `json:"statusCode"` Error string `json:"error"` diff --git a/internal/pkg/remote/client.go b/internal/pkg/remote/client.go index 085ab2bfe0e..5c8fd5c9a34 100644 --- a/internal/pkg/remote/client.go +++ b/internal/pkg/remote/client.go @@ -6,14 +6,17 @@ package remote import ( "context" + "fmt" "io" + "math/rand" "net/http" "net/url" + "sort" "strings" "sync" "time" - "github.com/pkg/errors" + "github.com/hashicorp/go-multierror" urlutil "github.com/elastic/elastic-agent-libs/kibana" "github.com/elastic/elastic-agent-libs/transport/httpcommon" @@ -26,33 +29,32 @@ const ( retryOnBadConnTimeout = 5 * time.Minute ) -type requestFunc func(string, string, url.Values, io.Reader) (*http.Request, error) type wrapperFunc func(rt http.RoundTripper) (http.RoundTripper, error) type requestClient struct { - request requestFunc + host string client http.Client lastUsed time.Time lastErr error lastErrOcc time.Time } -// Client wraps an http.Client and takes care of making the raw calls, the client should -// stay simple and specificals should be implemented in external action instead of adding new methods -// to the client. For authenticated calls or sending fields on every request, create customer RoundTripper -// implementations that will take care of the boiler plates. +// Client wraps a http.Client and takes care of making the raw calls, the client should +// stay simple and specifics should be implemented in external action instead of adding new methods +// to the client. For authenticated calls or sending fields on every request, create a custom RoundTripper +// implementation that will take care of the boilerplate. type Client struct { - log *logger.Logger - lock sync.Mutex - clients []*requestClient - config Config + log *logger.Logger + clientLock sync.Mutex + clients []*requestClient + config Config } // NewConfigFromURL returns a Config based on a received host. func NewConfigFromURL(URL string) (Config, error) { u, err := url.Parse(URL) if err != nil { - return Config{}, errors.Wrap(err, "could not parse url") + return Config{}, fmt.Errorf("could not parse url: %w", err) } c := DefaultClientConfig() @@ -76,7 +78,7 @@ func NewWithRawConfig(log *logger.Logger, config *config.Config, wrapper wrapper cfg := Config{} if err := config.Unpack(&cfg); err != nil { - return nil, errors.Wrap(err, "invalidate configuration") + return nil, fmt.Errorf("invalidate configuration: %w", err) } return NewWithConfig(l, cfg, wrapper) @@ -97,11 +99,14 @@ func NewWithConfig(log *logger.Logger, cfg Config, wrapper wrapperFunc) (*Client } hosts := cfg.GetHosts() - clients := make([]*requestClient, len(hosts)) - for i, host := range cfg.GetHosts() { - connStr, err := urlutil.MakeURL(string(cfg.Protocol), p, host, 0) + hostCount := len(hosts) + log.With("hosts", hosts).Debugf( + "creating remote client with %d hosts", hostCount) + clients := make([]*requestClient, hostCount) + for i, host := range hosts { + baseURL, err := urlutil.MakeURL(string(cfg.Protocol), p, host, 0) if err != nil { - return nil, errors.Wrap(err, "invalid fleet-server endpoint") + return nil, fmt.Errorf("invalid fleet-server endpoint: %w", err) } transport, err := cfg.Transport.RoundTripper( @@ -115,7 +120,7 @@ func NewWithConfig(log *logger.Logger, cfg Config, wrapper wrapperFunc) (*Client if wrapper != nil { transport, err = wrapper(transport) if err != nil { - return nil, errors.Wrap(err, "fail to create transport client") + return nil, fmt.Errorf("fail to create transport client: %w", err) } } @@ -125,17 +130,17 @@ func NewWithConfig(log *logger.Logger, cfg Config, wrapper wrapperFunc) (*Client } clients[i] = &requestClient{ - request: prefixRequestFactory(connStr), - client: httpClient, + host: baseURL, + client: httpClient, } } - return new(log, cfg, clients...) + return newClient(log, cfg, clients...) } -// Send executes a direct calls against the API, the method will takes cares of cloning -// also add necessary headers for likes: "Content-Type", "Accept", and "kbn-xsrf". -// No assumptions is done on the response concerning the received format, this will be the responsibility +// Send executes a direct calls against the API, the method will take care of cloning and +// also adding the necessary headers likes: "Content-Type", "Accept", and "kbn-xsrf". +// No assumptions are done on the response concerning the received format, this will be the responsibility // of the implementation to correctly unpack any received data. // // NOTE: @@ -155,45 +160,62 @@ func (c *Client) Send( } c.log.Debugf("Request method: %s, path: %s, reqID: %s", method, path, reqID) - c.lock.Lock() - defer c.lock.Unlock() - requester := c.nextRequester() + c.clientLock.Lock() + defer c.clientLock.Unlock() - req, err := requester.request(method, path, params, body) - if err != nil { - return nil, errors.Wrapf(err, "fail to create HTTP request using method %s to %s", method, path) - } + var resp *http.Response + var multiErr error - // Add generals headers to the request, we are dealing exclusively with JSON. - // Content-Type / Accepted type can be override from the called. - req.Header.Set("Content-Type", "application/json") - req.Header.Add("Accept", "application/json") - // This header should be specific to fleet-server or remove it - req.Header.Set("kbn-xsrf", "1") // Without this Kibana will refuse to answer the request. + c.sortClients() + for i, requester := range c.clients { + req, err := requester.newRequest(method, path, params, body) + if err != nil { + return nil, fmt.Errorf( + "fail to create HTTP request using method %s to %s: %w", + method, path, err) + } - // If available, add the request id as an HTTP header - if reqID != "" { - req.Header.Add("X-Request-ID", reqID) - } + // Add generals headers to the request, we are dealing exclusively with JSON. + // Content-Type / Accepted type can be overridden by the caller. + req.Header.Set("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + // This header should be specific to fleet-server or remove it + req.Header.Set("kbn-xsrf", "1") // Without this Kibana will refuse to answer the request. - // copy headers. - for header, values := range headers { - for _, v := range values { - req.Header.Add(header, v) + // If available, add the request id as an HTTP header + if reqID != "" { + req.Header.Add("X-Request-ID", reqID) } - } - requester.lastUsed = time.Now().UTC() + // copy headers. + for header, values := range headers { + for _, v := range values { + req.Header.Add(header, v) + } + } + + requester.lastUsed = time.Now().UTC() + + resp, err = requester.client.Do(req.WithContext(ctx)) + if err != nil { + requester.lastErr = err + requester.lastErrOcc = time.Now().UTC() + + msg := fmt.Sprintf("requester %d/%d to host %s errored", + i, len(c.clients), requester.host) + multiErr = multierror.Append(multiErr, fmt.Errorf("%s: %w", msg, err)) + + // Using debug level as the error is only relevant if all clients fail. + c.log.With("error", err).Debugf(msg) + continue + } - resp, err := requester.client.Do(req.WithContext(ctx)) - if err != nil { - requester.lastErr = err - requester.lastErrOcc = time.Now().UTC() - } else { requester.lastErr = nil requester.lastErrOcc = time.Time{} + return resp, nil } - return resp, err + + return nil, fmt.Errorf("all hosts failed: %w", multiErr) } // URI returns the remote URI. @@ -202,67 +224,78 @@ func (c *Client) URI() string { return string(c.config.Protocol) + "://" + host + "/" + c.config.Path } -// new creates new API client. -func new( +// newClient creates a new API client. +func newClient( log *logger.Logger, cfg Config, - httpClients ...*requestClient, + clients ...*requestClient, ) (*Client, error) { + // Shuffle so all the agents don't access the hosts in the same order + rand.Shuffle(len(clients), func(i, j int) { + clients[i], clients[j] = clients[j], clients[i] + }) + c := &Client{ log: log, - clients: httpClients, + clients: clients, config: cfg, } return c, nil } -// nextRequester returns the requester to use. -// -// It excludes clients that have errored in the last 5 minutes. -func (c *Client) nextRequester() *requestClient { - var selected *requestClient - +// sortClients sort the clients according to the following priority: +// - never used +// - without errors, last used first when more than one does not have errors +// - last errored. +// It also removes the last error after retryOnBadConnTimeout has elapsed. +func (c *Client) sortClients() { now := time.Now().UTC() - for _, requester := range c.clients { - if requester.lastErr != nil && now.Sub(requester.lastErrOcc) > retryOnBadConnTimeout { - requester.lastErr = nil - requester.lastErrOcc = time.Time{} + + sort.Slice(c.clients, func(i, j int) bool { + // First, set them good if the timout has elapsed + if c.clients[i].lastErr != nil && + now.Sub(c.clients[i].lastErrOcc) > retryOnBadConnTimeout { + c.clients[i].lastErr = nil + c.clients[i].lastErrOcc = time.Time{} } - if requester.lastErr != nil { - continue + if c.clients[j].lastErr != nil && + now.Sub(c.clients[j].lastErrOcc) > retryOnBadConnTimeout { + c.clients[j].lastErr = nil + c.clients[j].lastErrOcc = time.Time{} } - if requester.lastUsed.IsZero() { - // never been used, instant winner! - selected = requester - break + + // Pick not yet used first, but if both haven't been used yet, + // we return false to comply with the sort.Interface definition. + if c.clients[i].lastUsed.IsZero() && + c.clients[j].lastUsed.IsZero() { + return false } - if selected == nil { - selected = requester - continue + + // Pick not yet used first + if c.clients[i].lastUsed.IsZero() { + return true } - if requester.lastUsed.Before(selected.lastUsed) { - selected = requester + + // If none has errors, pick the last used + // Then, the one without errors + if c.clients[i].lastErr == nil && + c.clients[j].lastErr == nil { + return c.clients[i].lastUsed.Before(c.clients[j].lastUsed) } - } - if selected == nil { - // all are erroring; select the oldest one that errored - for _, requester := range c.clients { - if selected == nil { - selected = requester - continue - } - if requester.lastErrOcc.Before(selected.lastErrOcc) { - selected = requester - } + + // Then, the one without error + if c.clients[i].lastErr == nil { + return true } - } - return selected + + // Lastly, the one that errored last + return c.clients[i].lastUsed.Before(c.clients[j].lastUsed) + }) } -func prefixRequestFactory(URL string) requestFunc { - return func(method, path string, params url.Values, body io.Reader) (*http.Request, error) { - path = strings.TrimPrefix(path, "/") - newPath := strings.Join([]string{URL, path, "?", params.Encode()}, "") - return http.NewRequest(method, newPath, body) //nolint:noctx // keep old behaviour - } +func (r requestClient) newRequest(method string, path string, params url.Values, body io.Reader) (*http.Request, error) { + path = strings.TrimPrefix(path, "/") + newPath := strings.Join([]string{r.host, path, "?", params.Encode()}, "") + + return http.NewRequest(method, newPath, body) } diff --git a/internal/pkg/remote/client_test.go b/internal/pkg/remote/client_test.go index 6ea546f8128..887bc9817b2 100644 --- a/internal/pkg/remote/client_test.go +++ b/internal/pkg/remote/client_test.go @@ -58,7 +58,8 @@ func TestPortDefaults(t *testing.T) { c, err := NewWithConfig(l, cfg, nil) require.NoError(t, err) - r, err := c.nextRequester().request("GET", "/", nil, strings.NewReader("")) + c.sortClients() + r, err := c.clients[0].newRequest(http.MethodGet, "/", nil, strings.NewReader("")) require.NoError(t, err) if tc.ExpectedPort > 0 { @@ -77,13 +78,13 @@ func TestHTTPClient(t *testing.T) { l, err := logger.New("", false) require.NoError(t, err) + const successResp = `{"message":"hello"}` t.Run("Guard against double slashes on path", withServer( func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` mux := http.NewServeMux() mux.HandleFunc("/nested/echo-hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) + fmt.Fprint(w, successResp) }) return addCatchAll(mux, t) }, func(t *testing.T, host string) { @@ -97,23 +98,22 @@ func TestHTTPClient(t *testing.T) { client, err := NewWithConfig(l, c, noopWrapper) require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/nested/echo-hello", nil, nil, nil) + resp, err := client.Send(ctx, http.MethodGet, "/nested/echo-hello", nil, nil, nil) require.NoError(t, err) body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, `{ message: "hello" }`, string(body)) + assert.Equal(t, successResp, string(body)) }, )) t.Run("Simple call", withServer( func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` mux := http.NewServeMux() mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) + fmt.Fprint(w, successResp) }) return mux }, func(t *testing.T, host string) { @@ -123,23 +123,22 @@ func TestHTTPClient(t *testing.T) { client, err := NewWithRawConfig(nil, cfg, nil) require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + resp, err := client.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, nil) require.NoError(t, err) body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, `{ message: "hello" }`, string(body)) + assert.Equal(t, successResp, string(body)) }, )) t.Run("Simple call with a prefix path", withServer( func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` mux := http.NewServeMux() mux.HandleFunc("/mycustompath/echo-hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) + fmt.Fprint(w, successResp) }) return mux }, func(t *testing.T, host string) { @@ -150,23 +149,62 @@ func TestHTTPClient(t *testing.T) { client, err := NewWithRawConfig(nil, cfg, nil) require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + resp, err := client.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, nil) require.NoError(t, err) body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, `{ message: "hello" }`, string(body)) + assert.Equal(t, successResp, string(body)) }, )) + t.Run("Tries all the hosts", withServer( + func(t *testing.T) *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, successResp) + }) + return mux + }, func(t *testing.T, host string) { + one := &requestClient{host: "http://must.fail-1.co/"} + two := &requestClient{host: "http://must.fail-2.co/"} + three := &requestClient{host: fmt.Sprintf("http://%s/", host)} + + c := &Client{clients: []*requestClient{one, two, three}, log: l} + require.NoError(t, err) + resp, err := c.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, nil) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, successResp, string(body)) + }, + )) + + t.Run("Return last error", func(t *testing.T) { + client := &Client{ + log: l, + clients: []*requestClient{ + {host: "http://must.fail-1.co/"}, + {host: "http://must.fail-2.co/"}, + {host: "http://must.fail-3.co/"}, + }} + + resp, err := client.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, nil) + assert.Contains(t, err.Error(), "http://must.fail-3.co/") // error contains last host + assert.Nil(t, resp) + }) + t.Run("Custom user agent", withServer( func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` mux := http.NewServeMux() mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) + fmt.Fprint(w, successResp) require.Equal(t, r.Header.Get("User-Agent"), "custom-agent") }) return mux @@ -180,23 +218,22 @@ func TestHTTPClient(t *testing.T) { }) require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + resp, err := client.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, nil) require.NoError(t, err) body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, `{ message: "hello" }`, string(body)) + assert.Equal(t, successResp, string(body)) }, )) t.Run("Allows to debug HTTP request between a client and a server", withServer( func(t *testing.T) *http.ServeMux { - msg := `{ "message": "hello" }` mux := http.NewServeMux() mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) + fmt.Fprint(w, successResp) }) return mux }, func(t *testing.T, host string) { @@ -212,16 +249,16 @@ func TestHTTPClient(t *testing.T) { }) require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, bytes.NewBuffer([]byte("hello"))) + resp, err := client.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, bytes.NewBuffer([]byte("hello"))) require.NoError(t, err) body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, `{ "message": "hello" }`, string(body)) + assert.Equal(t, successResp, string(body)) for _, m := range debugger.messages { - fmt.Println(m) + fmt.Println(m) //nolint:forbidigo // printing debug messages on a test. } assert.Equal(t, 1, len(debugger.messages)) @@ -230,11 +267,10 @@ func TestHTTPClient(t *testing.T) { t.Run("RequestId", withServer( func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` mux := http.NewServeMux() mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) + fmt.Fprint(w, successResp) require.NotEmpty(t, r.Header.Get("X-Request-ID")) }) return mux @@ -245,48 +281,58 @@ func TestHTTPClient(t *testing.T) { client, err := NewWithRawConfig(nil, cfg, nil) require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) + resp, err := client.Send(ctx, http.MethodGet, "/echo-hello", nil, nil, nil) require.NoError(t, err) body, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, `{ message: "hello" }`, string(body)) + assert.Equal(t, successResp, string(body)) }, )) } -func TestNextRequester(t *testing.T) { +func TestSortClients(t *testing.T) { t.Run("Picks first requester on initial call", func(t *testing.T) { one := &requestClient{} two := &requestClient{} - client, err := new(nil, Config{}, one, two) + client, err := newClient(nil, Config{}, one, two) require.NoError(t, err) - assert.Equal(t, one, client.nextRequester()) + + client.sortClients() + + assert.Equal(t, one, client.clients[0]) }) t.Run("Picks second requester when first has error", func(t *testing.T) { one := &requestClient{ + lastUsed: time.Now().UTC(), lastErr: fmt.Errorf("fake error"), lastErrOcc: time.Now().UTC(), } two := &requestClient{} - client, err := new(nil, Config{}, one, two) + client, err := newClient(nil, Config{}, one, two) require.NoError(t, err) - assert.Equal(t, two, client.nextRequester()) + + client.sortClients() + + assert.Equal(t, two, client.clients[0]) }) - t.Run("Picks second requester when first has used", func(t *testing.T) { + t.Run("Picks second requester when first has been used", func(t *testing.T) { one := &requestClient{ lastUsed: time.Now().UTC(), } two := &requestClient{} - client, err := new(nil, Config{}, one, two) + client, err := newClient(nil, Config{}, one, two) require.NoError(t, err) - assert.Equal(t, two, client.nextRequester()) + + client.sortClients() + + assert.Equal(t, two, client.clients[0]) }) - t.Run("Picks second requester when its oldest", func(t *testing.T) { + t.Run("Picks second requester when it's the oldest", func(t *testing.T) { one := &requestClient{ lastUsed: time.Now().UTC().Add(-time.Minute), } @@ -296,12 +342,15 @@ func TestNextRequester(t *testing.T) { three := &requestClient{ lastUsed: time.Now().UTC().Add(-2 * time.Minute), } - client, err := new(nil, Config{}, one, two, three) + client, err := newClient(nil, Config{}, one, two, three) require.NoError(t, err) - assert.Equal(t, two, client.nextRequester()) + + client.sortClients() + + assert.Equal(t, two, client.clients[0]) }) - t.Run("Picks third requester when its second has error and first is last used", func(t *testing.T) { + t.Run("Picks third requester when second has error and first is last used", func(t *testing.T) { one := &requestClient{ lastUsed: time.Now().UTC().Add(-time.Minute), } @@ -313,9 +362,11 @@ func TestNextRequester(t *testing.T) { three := &requestClient{ lastUsed: time.Now().UTC().Add(-2 * time.Minute), } - client, err := new(nil, Config{}, one, two, three) - require.NoError(t, err) - assert.Equal(t, three, client.nextRequester()) + client := &Client{clients: []*requestClient{one, two, three}} + + client.sortClients() + + assert.Equal(t, three, client.clients[0]) }) t.Run("Picks second requester when its oldest and all have old errors", func(t *testing.T) { @@ -334,9 +385,12 @@ func TestNextRequester(t *testing.T) { lastErr: fmt.Errorf("fake error"), lastErrOcc: time.Now().Add(-2 * time.Minute), } - client, err := new(nil, Config{}, one, two, three) + client, err := newClient(nil, Config{}, one, two, three) require.NoError(t, err) - assert.Equal(t, two, client.nextRequester()) + + client.sortClients() + + assert.Equal(t, two, client.clients[0]) }) } diff --git a/pkg/component/component.go b/pkg/component/component.go index d65fcfec9a4..fa0a5d6e9f5 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -7,6 +7,7 @@ package component import ( "fmt" "os" + "sort" "strings" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -164,9 +165,17 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp return nil, nil, err } + // order output keys; ensures result is always the same order + outputKeys := make([]string, 0, len(outputsMap)) + for k := range outputsMap { + outputKeys = append(outputKeys, k) + } + sort.Strings(outputKeys) + var components []Component componentIdsInputMap := make(map[string]string) - for outputName, output := range outputsMap { + for _, outputName := range outputKeys { + output := outputsMap[outputName] if !output.enabled { // skip; not enabled continue diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 1c415537ad4..4f4b5225e47 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-cae815eb-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-f20b7179-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.6.0-cae815eb-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-f20b7179-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 0485d65c441..28c84d61f3d 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.18.7 +:go-version: 1.18.8 :release-state: unreleased :python: 3.7 :docker: 1.12