From 97461f3645ee0be348bebe670929c8c62760541e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 13 Jan 2022 11:40:09 -0500 Subject: [PATCH 01/30] [Elastic Agent] Add doc on how Fleet Server is bootstrapped. (#29563) (#29842) * Add doc on how Fleet Server is bootstrapped by Elastic Agent. * Add port 8221. * Update with suggessions. * Apply suggestions from code review Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> (cherry picked from commit a7f8517716541d466f66448f4538fd9002c1016b) Co-authored-by: Blake Rouse --- .../docs/fleet-server-bootstrap.asciidoc | 90 +++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc diff --git a/x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc b/x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc new file mode 100644 index 00000000000..1dc818f38d2 --- /dev/null +++ b/x-pack/elastic-agent/docs/fleet-server-bootstrap.asciidoc @@ -0,0 +1,90 @@ +[[fleet-server-bootstrap]] +== Fleet Server Bootstrap + +Elastic Agent with Fleet Server has a bootstrap process that it uses to get +Fleet Server up and running under Elastic Agent. + +Elastic Agent will bootstrap a Fleet Server when the `--fleet-server-es` +command-line option is provided to an `install` or `enroll` command. In this mode +Elastic Agent will only communicate with its local Fleet Server and expose +the Fleet Server over the `:8221` port. + +The `:8221` port is reserved for communication between the Elastic Agent and the +Fleet Server on the host. It is bound to the localhost of the machine/container +and cannot be accessed remotely. This ensures that the local Elastic Agent has +priority in check-ins with the Fleet Server. The `:8220` port is bound on +`0.0.0.0` to allow remote connections from external Elastic Agents that wish to +enroll and communicate. + +[float] +[[fleet-server-operating-modes]] +=== Operating Modes + +Elastic Agent can bootstrap the Fleet Server into three different modes. The mode +determines how Fleet Server exposes itself over the `:8220` port, but does not change +any other behaviour. + +==== Self-signed Certificate + +With the standard `--fleet-server-es` and `--fleet-server-service-token` options the +Elastic Agent will generate a CA and certificate for communication with +the Fleet Server that it starts. These certificates are generated +by Elastic Agent and passed to the Fleet Server, with Elastic Agent using the host's +hostname in the communication URL for valid TLS verification. + +==== HTTP Only + +Using the `--insecure` and `--fleet-server-insecure-http` will bootstrap the Fleet Server +without any certificates, it will be bound to `localhost:8220` and Elastic Agent will +communicate in clear-text. + +==== Custom Certificates (aka. Production) + +When deploying Elastic Agent in a production environment using enterprise generated +certificates will ensure that Elastic Agent running locally and remote Elastic Agent +will be able to connect over a verified TLS based connection. Certificates are specified +with `--fleet-server-cert`, `--fleet-server-cert-ca`, and `--certificate-authorities`. + +[float] +[[fleet-server-bootstrap-process]] +=== How Does It Bootstrap + +Bootstrapping is ran during the `enroll` command. The `install` command +or the `container` command (used by Docker container) will call the `enroll` +command to perform the bootstrapping process. + +==== Install Command + +When the `install` command is executed it places the Elastic Agent in the correct file +paths based on the operating system then starts the Elastic Agent service. The +`enroll` command is then executed by the `install` command. + +==== Container Command + +When the `container` command is executed it first copies the `data/downloads` directory +into a state path (`STATE_PATH`) then it executes the `enroll` command. + +==== Enroll Command + +This is where all the actual work of bootstrapping is performed. + +. A new `fleet.yml` is written with `fleet.server.*` options set along with +`fleet.server.bootstrap: true`. +. `enroll` command then either triggers a restart or spawns an Elastic Agent daemon. +.. First it checks if there is a running Elastic Agent daemon using the control socket. +In the case that there is a running Elastic Agent daemon it will trigger a restart through +the control socket. +.. In the case that there is no running Elastic Agent daemon a subprocess with the `run` +command will be started. The `enroll` command will then wait for the process to be up and +running by monitoring it with the control socket. +. The `status` command will then be polled through the control socket waiting for the +`fleet-server` application to be reported as `degraded`. `degraded` is reported because +the `fleet-server` is started without an `agent.id`. +. Once `fleet-server` is degraded the `enroll` command then uses localhost to communicate +with the running `fleet-server` to complete enrollment. This is the same enrollment used +by the Elastic Agent to a remote Fleet Server. +. A new `fleet.yml` is written with the enrollment information including its `agent.id` and +its API key to use for communication. The new `fleet.yml` still includes the `fleet.server.*`, +but this time the `fleet.server.bootstrap: false` is set. +. `enroll` command then either restarts the running Elatic Agent daemon if one was running +from Step 2, or it stops the spawned `run` subprocess and returns. From fecc0c2927eec674763725fd55e8dea9b67e9b77 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 13 Jan 2022 22:00:45 +0000 Subject: [PATCH 02/30] CI: add GitHub actions for macos (push based) (#29032) (#29845) (cherry picked from commit 6c408847901281f983ad01a2dc8c84309f437267) Co-authored-by: Victor Martinez --- .github/workflows/macos-auditbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-filebeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-heartbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-metricbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-packetbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-xpack-auditbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-xpack-filebeat.yml | 31 +++++++++++++++++++ .../workflows/macos-xpack-functionbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-xpack-heartbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-xpack-metricbeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-xpack-osquerybeat.yml | 31 +++++++++++++++++++ .github/workflows/macos-xpack-packetbeat.yml | 31 +++++++++++++++++++ 12 files changed, 372 insertions(+) create mode 100644 .github/workflows/macos-auditbeat.yml create mode 100644 .github/workflows/macos-filebeat.yml create mode 100644 .github/workflows/macos-heartbeat.yml create mode 100644 .github/workflows/macos-metricbeat.yml create mode 100644 .github/workflows/macos-packetbeat.yml create mode 100644 .github/workflows/macos-xpack-auditbeat.yml create mode 100644 .github/workflows/macos-xpack-filebeat.yml create mode 100644 .github/workflows/macos-xpack-functionbeat.yml create mode 100644 .github/workflows/macos-xpack-heartbeat.yml create mode 100644 .github/workflows/macos-xpack-metricbeat.yml create mode 100644 .github/workflows/macos-xpack-osquerybeat.yml create mode 100644 .github/workflows/macos-xpack-packetbeat.yml diff --git a/.github/workflows/macos-auditbeat.yml b/.github/workflows/macos-auditbeat.yml new file mode 100644 index 00000000000..79c90d33895 --- /dev/null +++ b/.github/workflows/macos-auditbeat.yml @@ -0,0 +1,31 @@ +name: auditbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-auditbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'auditbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-filebeat.yml b/.github/workflows/macos-filebeat.yml new file mode 100644 index 00000000000..3b3b5e7e7f3 --- /dev/null +++ b/.github/workflows/macos-filebeat.yml @@ -0,0 +1,31 @@ +name: filebeat + +on: + pull_request: + paths: + - '.github/workflows/macos-filebeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'filebeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-heartbeat.yml b/.github/workflows/macos-heartbeat.yml new file mode 100644 index 00000000000..efb54b84e0c --- /dev/null +++ b/.github/workflows/macos-heartbeat.yml @@ -0,0 +1,31 @@ +name: heartbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-heartbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'heartbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-metricbeat.yml b/.github/workflows/macos-metricbeat.yml new file mode 100644 index 00000000000..828feaa76f3 --- /dev/null +++ b/.github/workflows/macos-metricbeat.yml @@ -0,0 +1,31 @@ +name: metricbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-metricbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'metricbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && echo "See https://github.com/elastic/beats/issues/29038" diff --git a/.github/workflows/macos-packetbeat.yml b/.github/workflows/macos-packetbeat.yml new file mode 100644 index 00000000000..41fafadaa5b --- /dev/null +++ b/.github/workflows/macos-packetbeat.yml @@ -0,0 +1,31 @@ +name: packetbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-packetbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'packetbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-auditbeat.yml b/.github/workflows/macos-xpack-auditbeat.yml new file mode 100644 index 00000000000..6cab024ccc0 --- /dev/null +++ b/.github/workflows/macos-xpack-auditbeat.yml @@ -0,0 +1,31 @@ +name: x-pack-auditbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-auditbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/auditbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-filebeat.yml b/.github/workflows/macos-xpack-filebeat.yml new file mode 100644 index 00000000000..8f52fd7938e --- /dev/null +++ b/.github/workflows/macos-xpack-filebeat.yml @@ -0,0 +1,31 @@ +name: x-pack-filebeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-filebeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/filebeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-functionbeat.yml b/.github/workflows/macos-xpack-functionbeat.yml new file mode 100644 index 00000000000..5140ac6c2d9 --- /dev/null +++ b/.github/workflows/macos-xpack-functionbeat.yml @@ -0,0 +1,31 @@ +name: x-pack-functionbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-functionbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/functionbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-heartbeat.yml b/.github/workflows/macos-xpack-heartbeat.yml new file mode 100644 index 00000000000..73a7ca7a9da --- /dev/null +++ b/.github/workflows/macos-xpack-heartbeat.yml @@ -0,0 +1,31 @@ +name: x-pack-heartbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-heartbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/heartbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-metricbeat.yml b/.github/workflows/macos-xpack-metricbeat.yml new file mode 100644 index 00000000000..05121c82eb9 --- /dev/null +++ b/.github/workflows/macos-xpack-metricbeat.yml @@ -0,0 +1,31 @@ +name: x-pack-metricbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-metricbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/metricbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-osquerybeat.yml b/.github/workflows/macos-xpack-osquerybeat.yml new file mode 100644 index 00000000000..6f41b677131 --- /dev/null +++ b/.github/workflows/macos-xpack-osquerybeat.yml @@ -0,0 +1,31 @@ +name: x-pack-heartbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-osquerybeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/heartbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest diff --git a/.github/workflows/macos-xpack-packetbeat.yml b/.github/workflows/macos-xpack-packetbeat.yml new file mode 100644 index 00000000000..3e871ba689d --- /dev/null +++ b/.github/workflows/macos-xpack-packetbeat.yml @@ -0,0 +1,31 @@ +name: x-pack-metricbeat + +on: + pull_request: + paths: + - '.github/workflows/macos-xpack-packetbeat.yml' + push: + branches: + - master + - 7.1* + - 8.* + +env: + BEAT_MODULE: 'x-pack/metricbeat' + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v2 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go get -u github.com/magefile/mage + - name: Run build + run: cd ${{ env.BEAT_MODULE }} && mage build + - name: Run test + run: cd ${{ env.BEAT_MODULE }} && mage unitTest From cc0ad179fcd1ca28773419625558b4e4ca0301fd Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 13 Jan 2022 15:36:18 -0800 Subject: [PATCH 03/30] Add links to azure docs (#29829) (#29852) * Update azure.asciidoc Co-authored-by: Insuk (Chris) Cho (cherry picked from commit cbdba8aa1645a896023e387b370b5b88960f3176) Co-authored-by: DeDe Morton --- filebeat/docs/modules/azure.asciidoc | 8 ++++---- x-pack/filebeat/module/azure/_meta/docs.asciidoc | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/filebeat/docs/modules/azure.asciidoc b/filebeat/docs/modules/azure.asciidoc index 8997a206c03..92097e4cefa 100644 --- a/filebeat/docs/modules/azure.asciidoc +++ b/filebeat/docs/modules/azure.asciidoc @@ -22,16 +22,16 @@ There are several requirements before using the module since the logs will actua The module contains the following filesets: `activitylogs` :: -Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. +Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log[Azure Activity log] documentation. `platformlogs` :: -Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. +Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/platform-logs-overview[Azure platform logs] documentation. `signinlogs` :: -Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. +Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-sign-ins[Azure sign-in logs] documentation. `auditlogs` :: -Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. +Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-audit-logs[Azure audit logs] documentation. [float] === Module configuration diff --git a/x-pack/filebeat/module/azure/_meta/docs.asciidoc b/x-pack/filebeat/module/azure/_meta/docs.asciidoc index d8c52d2c4f1..9ed929f510d 100644 --- a/x-pack/filebeat/module/azure/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/azure/_meta/docs.asciidoc @@ -17,16 +17,16 @@ There are several requirements before using the module since the logs will actua The module contains the following filesets: `activitylogs` :: -Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. +Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/activity-log[Azure Activity log] documentation. `platformlogs` :: -Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. +Will retrieve azure platform logs. Platform logs provide detailed diagnostic and auditing information for Azure resources and the Azure platform they depend on. To learn more, refer to the https://docs.microsoft.com/en-us/azure/azure-monitor/essentials/platform-logs-overview[Azure platform logs] documentation. `signinlogs` :: -Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. +Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-sign-ins[Azure sign-in logs] documentation. `auditlogs` :: -Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. +Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. To learn more, refer to the https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/concept-audit-logs[Azure audit logs] documentation. [float] === Module configuration From c0d9dbf4d4828f0415b956ca675fdb815ec6b916 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 14 Jan 2022 02:16:07 -0500 Subject: [PATCH 04/30] [Automation] Update elastic stack version to 7.17.0-560490e9 for testing (#29857) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 96474da754a..7c3db75de44 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-9b8314f9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-560490e9-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -17,7 +17,7 @@ services: - "indices.id_field_data.enabled=true" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.17.0-9b8314f9-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.17.0-560490e9-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -27,7 +27,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.17.0-9b8314f9-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.17.0-560490e9-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 91d33be3d83..4a8ffcc1e59 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-9b8314f9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-560490e9-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -22,7 +22,7 @@ services: - "ingest.geoip.downloader.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.17.0-9b8314f9-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.17.0-560490e9-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -32,7 +32,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.17.0-9b8314f9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.17.0-560490e9-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From dffe56cb07c69aaa6f8ceb9ef083242b477ab832 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 10:58:43 +0000 Subject: [PATCH 05/30] Add summary to journeys which don't emit journey:end (early node subprocess exits) (#29606) (#29812) * update link to beats developer guide * fix: add summary to journeys which don't emit journey:end [fixes #28770] * fix: avoid cmd/status when journey has already finished (cherry picked from commit 3270ae1ab631a156f8f7913ca3a794fe6bd80b2f) Co-authored-by: Lucas F. da Costa --- CHANGELOG.next.asciidoc | 1 + libbeat/README.md | 2 +- .../monitors/browser/synthexec/enrich.go | 41 ++++--- .../monitors/browser/synthexec/enrich_test.go | 113 ++++++++++++++++-- .../browser/synthexec/execmultiplexer.go | 2 +- .../browser/synthexec/execmultiplexer_test.go | 23 ++-- .../monitors/browser/synthexec/synthexec.go | 7 +- 7 files changed, 150 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 027fc151524..3ade1e986a7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -19,6 +19,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Heartbeat* - Only add monitor.status to browser events when summary. {pull}29460[29460] +- Also add summary to journeys for which the synthetics runner crashes. {pull}29606[29606] *Metricbeat* diff --git a/libbeat/README.md b/libbeat/README.md index cf99987c5a3..06a22219dfe 100644 --- a/libbeat/README.md +++ b/libbeat/README.md @@ -8,7 +8,7 @@ If you want to create a new project that reads some sort of operational data and ships it to Elasticsearch, we suggest you make use of this library. Please start by reading our [CONTRIBUTING](../CONTRIBUTING.md) file. We also have a [developer -guide](https://www.elastic.co/guide/en/beats/libbeat/current/new-beat.html) to +guide](https://www.elastic.co/guide/en/beats/devguide/master/index.html) to help you with the creation of new Beats. Please also open a topic on the [forums](https://discuss.elastic.co/c/beats/libbeat) and diff --git a/x-pack/heartbeat/monitors/browser/synthexec/enrich.go b/x-pack/heartbeat/monitors/browser/synthexec/enrich.go index c3ab6c76faa..bec551a7947 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/enrich.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/enrich.go @@ -112,6 +112,14 @@ func (je *journeyEnricher) enrichSynthEvent(event *beat.Event, se *SynthEvent) e } switch se.Type { + case "cmd/status": + // If a command failed _after_ the journey was complete, as it happens + // when an `afterAll` hook fails, for example, we don't wan't to include + // a summary in the cmd/status event. + if !je.journeyComplete { + je.end = event.Timestamp + return je.createSummary(event) + } case "journey/end": je.journeyComplete = true return je.createSummary(event) @@ -155,23 +163,24 @@ func (je *journeyEnricher) createSummary(event *beat.Event) error { down = 0 } - if je.journeyComplete { - eventext.MergeEventFields(event, common.MapStr{ - "url": je.urlFields, - "synthetics": common.MapStr{ - "type": "heartbeat/summary", - "journey": je.journey, - }, - "monitor": common.MapStr{ - "duration": common.MapStr{ - "us": int64(je.end.Sub(je.start) / time.Microsecond), - }, - }, - "summary": common.MapStr{ - "up": up, - "down": down, + eventext.MergeEventFields(event, common.MapStr{ + "url": je.urlFields, + "synthetics": common.MapStr{ + "type": "heartbeat/summary", + "journey": je.journey, + }, + "monitor": common.MapStr{ + "duration": common.MapStr{ + "us": int64(je.end.Sub(je.start) / time.Microsecond), }, - }) + }, + "summary": common.MapStr{ + "up": up, + "down": down, + }, + }) + + if je.journeyComplete { return je.firstError } diff --git a/x-pack/heartbeat/monitors/browser/synthexec/enrich_test.go b/x-pack/heartbeat/monitors/browser/synthexec/enrich_test.go index 629454f34c0..f2c8ba25dca 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/enrich_test.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/enrich_test.go @@ -21,6 +21,18 @@ import ( "github.com/elastic/go-lookslike/testslike" ) +func makeStepEvent(typ string, ts float64, name string, index int, status string, urlstr string, err *SynthError) *SynthEvent { + return &SynthEvent{ + Type: typ, + TimestampEpochMicros: 1000 + ts, + PackageVersion: "1.0.0", + Step: &Step{Name: name, Index: index, Status: status}, + Error: err, + Payload: common.MapStr{}, + URL: urlstr, + } +} + func TestJourneyEnricher(t *testing.T) { journey := &Journey{ Name: "A Journey Name", @@ -50,17 +62,6 @@ func TestJourneyEnricher(t *testing.T) { Journey: journey, Payload: common.MapStr{}, } - makeStepEvent := func(typ string, ts float64, name string, index int, status string, urlstr string, err *SynthError) *SynthEvent { - return &SynthEvent{ - Type: typ, - TimestampEpochMicros: 1000 + ts, - PackageVersion: "1.0.0", - Step: &Step{Name: name, Index: index, Status: status}, - Error: err, - Payload: common.MapStr{}, - URL: urlstr, - } - } url1 := "http://example.net/url1" url2 := "http://example.net/url2" url3 := "http://example.net/url3" @@ -121,6 +122,24 @@ func TestEnrichSynthEvent(t *testing.T) { wantErr bool check func(t *testing.T, e *beat.Event, je *journeyEnricher) }{ + { + "cmd/status", + &journeyEnricher{}, + &SynthEvent{ + Type: "cmd/status", + Error: &SynthError{Name: "cmdexit", Message: "cmd err msg"}, + }, + true, + func(t *testing.T, e *beat.Event, je *journeyEnricher) { + v := lookslike.MustCompile(map[string]interface{}{ + "summary": map[string]int{ + "up": 0, + "down": 1, + }, + }) + testslike.Test(t, v, e.Fields) + }, + }, { "journey/end", &journeyEnricher{}, @@ -195,3 +214,75 @@ func TestEnrichSynthEvent(t *testing.T) { }) } } + +func TestNoSummaryOnAfterHook(t *testing.T) { + journey := &Journey{ + Name: "A journey that fails after completing", + Id: "my-bad-after-all-hook", + } + journeyStart := &SynthEvent{ + Type: "journey/start", + TimestampEpochMicros: 1000, + PackageVersion: "1.0.0", + Journey: journey, + Payload: common.MapStr{}, + } + syntherr := &SynthError{ + Message: "my-errmsg", + Name: "my-errname", + Stack: "my\nerr\nstack", + } + journeyEnd := &SynthEvent{ + Type: "journey/end", + TimestampEpochMicros: 2000, + PackageVersion: "1.0.0", + Journey: journey, + Payload: common.MapStr{}, + } + cmdStatus := &SynthEvent{ + Type: "cmd/status", + Error: &SynthError{Name: "cmdexit", Message: "cmd err msg"}, + TimestampEpochMicros: 3000, + } + + badStepUrl := "https://example.com/bad-step" + synthEvents := []*SynthEvent{ + journeyStart, + makeStepEvent("step/start", 10, "Step1", 1, "", "", nil), + makeStepEvent("step/end", 20, "Step1", 1, "failed", badStepUrl, syntherr), + journeyEnd, + cmdStatus, + } + + je := &journeyEnricher{} + + for idx, se := range synthEvents { + e := &beat.Event{} + t.Run(fmt.Sprintf("event %d", idx), func(t *testing.T) { + enrichErr := je.enrich(e, se) + + if se != nil && se.Type == "cmd/status" { + t.Run("no summary in cmd/status", func(t *testing.T) { + require.NotContains(t, e.Fields, "summary") + }) + } + + // Only the journey/end event should get a summary when + // it's emitted before the cmd/status (when an afterX hook fails). + if se != nil && se.Type == "journey/end" { + require.Equal(t, stepError(syntherr), enrichErr) + + u, _ := url.Parse(badStepUrl) + t.Run("summary in journey/end", func(t *testing.T) { + v := lookslike.MustCompile(common.MapStr{ + "synthetics.type": "heartbeat/summary", + "url": wrappers.URLFields(u), + "monitor.duration.us": int64(journeyEnd.Timestamp().Sub(journeyStart.Timestamp()) / time.Microsecond), + }) + + testslike.Test(t, v, e.Fields) + }) + } + }) + } +} diff --git a/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer.go b/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer.go index 57b423626d1..07de0143c38 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer.go @@ -29,7 +29,7 @@ func (e ExecMultiplexer) writeSynthEvent(se *SynthEvent) { e.eventCounter.Store(-1) } hasCurrentJourney := e.currentJourney.Load() - if se.Type == "journey/end" { + if se.Type == "journey/end" || se.Type == "cmd/status" { e.currentJourney.Store(false) } diff --git a/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer_test.go b/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer_test.go index 56af4c94d9d..ec85a6b5222 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer_test.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/execmultiplexer_test.go @@ -18,7 +18,7 @@ func TestExecMultiplexer(t *testing.T) { var testJourneys []*Journey var testEvents []*SynthEvent time := float64(0) - for jIdx := 0; jIdx < 3; jIdx++ { + for jIdx := 0; jIdx < 4; jIdx++ { time++ // fake time to make events seem spaced out journey := &Journey{ Name: fmt.Sprintf("J%d", jIdx), @@ -45,11 +45,20 @@ func TestExecMultiplexer(t *testing.T) { }) } - testEvents = append(testEvents, &SynthEvent{ - Journey: journey, - Type: "journey/end", - TimestampEpochMicros: time, - }) + // We want one of the test journeys to end with a cmd/status indicating it failed + if jIdx != 4 { + testEvents = append(testEvents, &SynthEvent{ + Journey: journey, + Type: "journey/end", + TimestampEpochMicros: time, + }) + } else { + testEvents = append(testEvents, &SynthEvent{ + Journey: journey, + Type: "cmd/status", + TimestampEpochMicros: time, + }) + } } // Write the test events in another go routine since writes block @@ -77,7 +86,7 @@ Loop: i := 0 // counter for index, resets on journey change for _, se := range results { require.Equal(t, i, se.index) - if se.Type == "journey/end" { + if se.Type == "journey/end" || se.Type == "cmd/status" { i = 0 } else { i++ diff --git a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go index e5bcd1332f5..039a3480c80 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go @@ -196,8 +196,9 @@ func runCmd( if err != nil { str := fmt.Sprintf("command exited with status %d: %s", cmd.ProcessState.ExitCode(), err) mpx.writeSynthEvent(&SynthEvent{ - Type: "cmd/status", - Error: &SynthError{Name: "cmdexit", Message: str}, + Type: "cmd/status", + Error: &SynthError{Name: "cmdexit", Message: str}, + TimestampEpochMicros: float64(time.Now().UnixMicro()), }) logp.Warn("Error executing command '%s' (%d): %s", loggableCmd.String(), cmd.ProcessState.ExitCode(), err) } @@ -243,7 +244,7 @@ func lineToSynthEventFactory(typ string) func(bytes []byte, text string) (res *S logp.Info("%s: %s", typ, text) return &SynthEvent{ Type: typ, - TimestampEpochMicros: float64(time.Now().UnixNano() / int64(time.Millisecond)), + TimestampEpochMicros: float64(time.Now().UnixMicro()), Payload: map[string]interface{}{ "message": text, }, From 1310256c53d2aa55786c74e8e98a870385eb0114 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 05:10:33 -0700 Subject: [PATCH 06/30] Osquerybeat: Change osquerybeat packaging for windows, replace .MSI file with extracted osqueryd.exe during build process (#29801) (#29824) (cherry picked from commit e61a3656cd70e18099db4ef4d20397e0c237a959) Co-authored-by: Aleksandr Maus --- x-pack/osquerybeat/beater/install.go | 4 +- x-pack/osquerybeat/internal/distro/distro.go | 16 ++- x-pack/osquerybeat/magefile.go | 100 ++++++++++++++++++- x-pack/osquerybeat/scripts/mage/distro.go | 8 +- x-pack/osquerybeat/scripts/mage/package.go | 2 +- 5 files changed, 117 insertions(+), 13 deletions(-) diff --git a/x-pack/osquerybeat/beater/install.go b/x-pack/osquerybeat/beater/install.go index 7d25ba6b752..c299ed68454 100644 --- a/x-pack/osquerybeat/beater/install.go +++ b/x-pack/osquerybeat/beater/install.go @@ -34,9 +34,7 @@ func installOsqueryWithDir(ctx context.Context, dir string) error { fn := distro.OsquerydDistroFilename() var installFunc func(context.Context, string, string, bool) error - if runtime.GOOS == "windows" { - installFunc = install.InstallFromMSI - } else if runtime.GOOS == "darwin" { + if runtime.GOOS == "darwin" { installFunc = install.InstallFromPkg } diff --git a/x-pack/osquerybeat/internal/distro/distro.go b/x-pack/osquerybeat/internal/distro/distro.go index bcea584ece7..42b8da6a78d 100644 --- a/x-pack/osquerybeat/internal/distro/distro.go +++ b/x-pack/osquerybeat/internal/distro/distro.go @@ -57,19 +57,27 @@ func GetDataInstallDir(osarch OSArch) string { return filepath.Join(DataInstallDir, osarch.OS, osarch.Arch) } -func OsquerydFilename() string { - if runtime.GOOS == "windows" { +func OsquerydFilenameForOS(os string) string { + if os == "windows" { return osqueryDName + ".exe" } return osqueryDName } +func OsquerydFilename() string { + return OsquerydFilenameForOS(runtime.GOOS) +} + func OsquerydDarwinApp() string { return osqueryDarwinApp } +func OsquerydPathForOS(os, dir string) string { + return filepath.Join(dir, OsquerydFilenameForOS(os)) +} + func OsquerydPath(dir string) string { - return filepath.Join(dir, OsquerydFilename()) + return OsquerydPathForOS(runtime.GOOS, dir) } func OsquerydDarwinDistroPath() string { @@ -87,7 +95,7 @@ func OsquerydDistroFilename() string { func OsquerydDistroPlatformFilename(platform string) string { switch platform { case "windows": - return osqueryName + "-" + osqueryVersion + osqueryMSIExt + return OsquerydFilenameForOS(platform) case "darwin": return osqueryName + "-" + osqueryVersion + osqueryPkgExt } diff --git a/x-pack/osquerybeat/magefile.go b/x-pack/osquerybeat/magefile.go index 2d4e9a1b7db..070cf584125 100644 --- a/x-pack/osquerybeat/magefile.go +++ b/x-pack/osquerybeat/magefile.go @@ -8,15 +8,20 @@ package main import ( + "context" + "errors" "fmt" "os" "path/filepath" "runtime" + "strings" "time" "github.com/magefile/mage/mg" devtools "github.com/elastic/beats/v7/dev-tools/mage" + "github.com/elastic/beats/v7/x-pack/osquerybeat/internal/command" + "github.com/elastic/beats/v7/x-pack/osquerybeat/internal/distro" osquerybeat "github.com/elastic/beats/v7/x-pack/osquerybeat/scripts/mage" // mage:import @@ -77,10 +82,103 @@ func Clean() error { return devtools.Clean(paths) } +func extractFromMSI() error { + if os.Getenv("GOOS") != "windows" { + return nil + } + + ctx := context.Background() + + execCommand := func(name string, args ...string) error { + ps := strings.Join(append([]string{name}, args...), " ") + fmt.Println(ps) + output, err := command.Execute(ctx, name, args...) + if err != nil { + fmt.Println(ps, ", failed: ", err) + return err + } + fmt.Print(output) + return err + } + + // Install msitools + err := execCommand("apt", "update") + if err != nil { + return err + } + + err = execCommand("apt", "install", "-y", "msitools") + if err != nil { + return err + } + + osArchs := osquerybeat.OSArchs(devtools.Platforms) + + for _, osarch := range osArchs { + if osarch.OS != "windows" { + continue + } + spec, err := distro.GetSpec(osarch) + if err != nil { + if errors.Is(err, distro.ErrUnsupportedOS) { + continue + } else { + return err + } + } + dip := distro.GetDataInstallDir(osarch) + msiFile := spec.DistroFilepath(dip) + + // MSI extract + err = execCommand("msiextract", "--directory", dip, msiFile) + if err != nil { + return err + } + + fmt.Println("copy osqueryd.exe from MSI") + dp := distro.OsquerydPathForOS(osarch.OS, dip) + err = devtools.Copy(filepath.Join(dip, "osquery", "osqueryd", "osqueryd.exe"), dp) + if err != nil { + fmt.Println("copy osqueryd.exe from MSI failed: ", err) + return err + } + // Chmod set to the same as other executables in the final package + if err = os.Chmod(dp, 0755); err != nil { + return err + } + } + + return nil +} + // GolangCrossBuild build the Beat binary inside of the golang-builder. // Do not use directly, use crossBuild instead. func GolangCrossBuild() error { - return devtools.GolangCrossBuild(devtools.DefaultGolangCrossBuildArgs()) + // This is to fix a defect in the field where msiexec fails to extract the osqueryd.exe + // from bundled osquery.msi, with error code 1603 + // https://docs.microsoft.com/en-us/troubleshoot/windows-server/application-management/msi-installation-error-1603 + // SDH: https://github.com/elastic/sdh-beats/issues/1575 + // Currently we can't reproduce this is issue, but here we can eliminate the need for calling msiexec + // if extract the osqueryd.exe binary during the build. + // + // The builder docker images are Debian so we need to install msitools for + // linux in order to extract the osqueryd.exe from MSI during build process. // Install MSI tools in order to extract file from MSI + // Ideally we would want these to be a part of the build docker image, + // but doing this here for now due to limited time before 7.16.2 + // + // The cross build is currently called for two binaries osquerybeat and osqquery-extension + // Only install msitools and extract osqueryd.exe during osquerybeat build on windows + args := devtools.DefaultGolangCrossBuildArgs() + + // Install msitools only + if !strings.HasPrefix(args.Name, "osquery-extension-") { + // Install msitools in the container and extract osqueryd.exe from MSI + if err := extractFromMSI(); err != nil { + return err + } + } + + return devtools.GolangCrossBuild(args) } // BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). diff --git a/x-pack/osquerybeat/scripts/mage/distro.go b/x-pack/osquerybeat/scripts/mage/distro.go index 1be99ae3f6d..87fab857bb9 100644 --- a/x-pack/osquerybeat/scripts/mage/distro.go +++ b/x-pack/osquerybeat/scripts/mage/distro.go @@ -24,20 +24,20 @@ import ( // FetchOsqueryDistros fetches Osquery official distros as a part of the build func FetchOsqueryDistros() error { - osArchs := osArchs(devtools.Platforms) + osArchs := OSArchs(devtools.Platforms) log.Printf("Fetch Osquery distros for %v", osArchs) for _, osarch := range osArchs { spec, err := distro.GetSpec(osarch) if err != nil { if errors.Is(err, distro.ErrUnsupportedOS) { - log.Printf("The build spec %v is not supported, continue", spec) + log.Printf("The build spec %v is not supported, continue\n", spec) continue } else { return err } } - log.Print("Found spec:", spec) + log.Println("Found spec:", spec) fetched, err := checkCacheAndFetch(osarch, spec) if err != nil { @@ -66,7 +66,7 @@ func FetchOsqueryDistros() error { return nil } -func osArchs(platforms devtools.BuildPlatformList) []distro.OSArch { +func OSArchs(platforms devtools.BuildPlatformList) []distro.OSArch { mp := make(map[distro.OSArch]struct{}) for _, platform := range platforms { diff --git a/x-pack/osquerybeat/scripts/mage/package.go b/x-pack/osquerybeat/scripts/mage/package.go index 67ddf18568a..5f003942b11 100644 --- a/x-pack/osquerybeat/scripts/mage/package.go +++ b/x-pack/osquerybeat/scripts/mage/package.go @@ -23,7 +23,7 @@ func CustomizePackaging() { // TODO: this could be moved to dev-tools/packaging/packages.yml for the next release var mode os.FileMode = 0644 // If distFile is osqueryd binary then it should be executable - if distFile == distro.OsquerydFilename() { + if distFile == distro.OsquerydFilenameForOS(args.OS) { mode = 0750 } arch := defaultArch From d596a95bebc3a1481b8e8becf0b0047add3e6dde Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 10:11:47 -0500 Subject: [PATCH 07/30] [7.17](backport #29780) Replace location of apoydence/eachers to poy/eachers (#29838) Co-authored-by: Pier-Hugues Pellerin --- NOTICE.txt | 4 ++-- go.mod | 1 + go.sum | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index bc566be3834..e402d911d5f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -22883,12 +22883,12 @@ For t_cl_generator.cc -------------------------------------------------------------------------------- -Dependency : github.com/apoydence/eachers +Dependency : github.com/poy/eachers Version: v0.0.0-20181020210610-23942921fe77 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/apoydence/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: +Contents of probable licence file $GOMODCACHE/github.com/poy/eachers@v0.0.0-20181020210610-23942921fe77/LICENSE.md: The MIT License (MIT) diff --git a/go.mod b/go.mod index e1feba34564..0589e817ea5 100644 --- a/go.mod +++ b/go.mod @@ -288,6 +288,7 @@ require ( replace ( github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20210823122811-11c3ef800752 + github.com/apoydence/eachers => github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 //indirect, see https://github.com/elastic/beats/pull/29780 for details. github.com/cucumber/godog => github.com/cucumber/godog v0.8.1 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 github.com/docker/go-plugins-helpers => github.com/elastic/go-plugins-helpers v0.0.0-20200207104224-bdf17607b79f diff --git a/go.sum b/go.sum index da9cea8d338..0590203214b 100644 --- a/go.sum +++ b/go.sum @@ -174,8 +174,6 @@ github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d h1:OE3kzLBpy7pOJEzE55 github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= github.com/apache/thrift v0.13.1-0.20200603211036-eac4d0c79a5f h1:33BV5v3u8I6dA2dEoPuXWCsAaHHOJfPtdxZhAMQV4uo= github.com/apache/thrift v0.13.1-0.20200603211036-eac4d0c79a5f/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= -github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -958,6 +956,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= From 7b0c1df785abe6d3877daee9385232b99bfc77fd Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 10:43:21 -0600 Subject: [PATCH 08/30] Add note on docker container as a requirement for browser monitors (#29794) (#29856) * Add note on docker container as a requirement for browser monitors We mention that you need the container in the synthetics guide, but not on this page, this is a bit cleaner. * Update monitor-browser.asciidoc * More * Update heartbeat/docs/monitors/monitor-browser.asciidoc Co-authored-by: EamonnTP Co-authored-by: EamonnTP (cherry picked from commit 09c3abf1df942428337499f6b7ebb8136d02ada6) Co-authored-by: Andrew Cholakian --- heartbeat/docs/monitors/monitor-browser.asciidoc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/heartbeat/docs/monitors/monitor-browser.asciidoc b/heartbeat/docs/monitors/monitor-browser.asciidoc index 49f31668d47..7c35bb7b6d7 100644 --- a/heartbeat/docs/monitors/monitor-browser.asciidoc +++ b/heartbeat/docs/monitors/monitor-browser.asciidoc @@ -6,7 +6,12 @@ See the {observability-guide}/synthetics-quickstart.html[quick start guide]. beta[] The options described here configure {beatname_uc} to run the synthetic monitoring test suites via Synthetic Agent on the Chromium browser. -Additional shared options are defined in <>. +Additional shared options are defined in <>. + +Browser based monitors can only be run in our {beatname_uc} docker image, +or via the `elastic-agent-complete` docker image. +For more information, see {observability-guide}/synthetics-quickstart.html[Synthetic monitoring using Docker]. + Example configuration: [source,yaml] From 6c305d98e24f9d3fb8fdd02fe3cd39a82099ec06 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 15:15:08 -0600 Subject: [PATCH 09/30] [Heartbeat] Use timer.reset now that golang has been updated (#29729) (#29733) Undoes https://github.com/elastic/beats/pull/27006/files (while preserving the new test), and also cleaning up the syntax using `time.Until`. Since the The golang bug I reported in https://github.com/golang/go/issues/47329 has been fixed since somewhere in go 1.16.x (it's hard to track the exact version). This should be backported to 7.16.x since that already uses go 1.17.x and is safe. (cherry picked from commit 7b095dae991763ab612d673f85fcbec920b2fb53) Co-authored-by: Andrew Cholakian --- heartbeat/scheduler/timerqueue/queue.go | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/heartbeat/scheduler/timerqueue/queue.go b/heartbeat/scheduler/timerqueue/queue.go index 61d4e0ac933..760dd353f38 100644 --- a/heartbeat/scheduler/timerqueue/queue.go +++ b/heartbeat/scheduler/timerqueue/queue.go @@ -88,7 +88,7 @@ func (tq *TimerQueue) Start() { if tq.th.Len() > 0 { nr := tq.th[0].runAt tq.nextRunAt = &nr - tq.timer = time.NewTimer(time.Until(nr)) + tq.timer.Reset(time.Until(nr)) } else { tq.timer.Stop() tq.nextRunAt = nil @@ -107,18 +107,7 @@ func (tq *TimerQueue) pushInternal(tt *timerTask) { if tq.nextRunAt != nil && !tq.timer.Stop() { <-tq.timer.C } - // Originally the line below this comment was - // - // tq.timer.Reset(time.Until(tt.runAt)) - // - // however this broke in go1.16rc1, specifically on the commit b4b014465216790e01aa66f9120d03230e4aff46 - //, specifically on this line: - // https://github.com/golang/go/commit/b4b014465216790e01aa66f9120d03230e4aff46#diff-73699b6edfe5dbb3f6824e66bb3566bce9405e9a8c810cac55c8199459f0ac19R652 - // where some nice new optimizations don't actually work reliably - // This can be worked around by instantiating a new timer rather than resetting the timer. - // since that internally calls deltimer in runtime/timer.go rather than modtimer, - // I suspect that the problem is in modtimer's setting of &pp.timerModifiedEarliest - tq.timer = time.NewTimer(time.Until(tt.runAt)) + tq.timer.Reset(time.Until(tt.runAt)) tq.nextRunAt = &tt.runAt } } From 362ad39f500d688b720788dedaa2b2bb5b812f24 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 17 Jan 2022 11:35:19 +0100 Subject: [PATCH 10/30] [7.17](backport #29676) Remove overriding of index pattern on the Kubernetes overview dashboard (#29792) * Remove overriding of index pattern on the Kubernetes overview dashboard (#29676) * Remove overriding of index pattern on the Kubernetes overview dashboard; align visualisations with changes in https://github.com/elastic/integrations/pull/2151 Signed-off-by: Tetiana Kravchenko * add PR number in changelog Signed-off-by: Tetiana Kravchenko (cherry picked from commit 896b7e481c0e8046d20b02f253a4f7bcf9c73494) * Update CHANGELOG.next.asciidoc Co-authored-by: Tetiana Kravchenko --- CHANGELOG.next.asciidoc | 2 ++ .../174a6ad0-30e0-11e7-8df8-6d3604a72912-ecs.json | 3 +-- .../da1ff7c0-30ed-11e7-b9e5-2b5b07213ab3-ecs.json | 5 +---- .../e1018b90-2bfb-11e7-859b-f78b612cde28-ecs.json | 4 +--- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3ade1e986a7..88562b2df6e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -23,6 +23,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Metricbeat* +- Remove overriding of index pattern on the Kubernetes overview dashboard. {pull}29676[29676] + *Packetbeat* *Winlogbeat* diff --git a/metricbeat/module/kubernetes/_meta/kibana/7/visualization/174a6ad0-30e0-11e7-8df8-6d3604a72912-ecs.json b/metricbeat/module/kubernetes/_meta/kibana/7/visualization/174a6ad0-30e0-11e7-8df8-6d3604a72912-ecs.json index 12a7a5a03b6..530eda35e08 100644 --- a/metricbeat/module/kubernetes/_meta/kibana/7/visualization/174a6ad0-30e0-11e7-8df8-6d3604a72912-ecs.json +++ b/metricbeat/module/kubernetes/_meta/kibana/7/visualization/174a6ad0-30e0-11e7-8df8-6d3604a72912-ecs.json @@ -57,7 +57,6 @@ "type": "sum" } ], - "override_index_pattern": 1, "point_size": 1, "seperate_axis": 0, "series_interval": "10s", @@ -84,4 +83,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:37.319Z", "version": "WzQwMjMsMV0=" -} \ No newline at end of file +} diff --git a/metricbeat/module/kubernetes/_meta/kibana/7/visualization/da1ff7c0-30ed-11e7-b9e5-2b5b07213ab3-ecs.json b/metricbeat/module/kubernetes/_meta/kibana/7/visualization/da1ff7c0-30ed-11e7-b9e5-2b5b07213ab3-ecs.json index a72257a0686..5d3875f54da 100644 --- a/metricbeat/module/kubernetes/_meta/kibana/7/visualization/da1ff7c0-30ed-11e7-b9e5-2b5b07213ab3-ecs.json +++ b/metricbeat/module/kubernetes/_meta/kibana/7/visualization/da1ff7c0-30ed-11e7-b9e5-2b5b07213ab3-ecs.json @@ -57,12 +57,9 @@ "type": "sum" } ], - "override_index_pattern": 1, "point_size": 1, "seperate_axis": 0, - "series_index_pattern": "*", "series_interval": "10s", - "series_time_field": "@timestamp", "split_color_mode": "gradient", "split_mode": "everything", "stacked": "none" @@ -86,4 +83,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:37.319Z", "version": "WzQwMjUsMV0=" -} \ No newline at end of file +} diff --git a/metricbeat/module/kubernetes/_meta/kibana/7/visualization/e1018b90-2bfb-11e7-859b-f78b612cde28-ecs.json b/metricbeat/module/kubernetes/_meta/kibana/7/visualization/e1018b90-2bfb-11e7-859b-f78b612cde28-ecs.json index 3c3492288fb..c352ee06e79 100644 --- a/metricbeat/module/kubernetes/_meta/kibana/7/visualization/e1018b90-2bfb-11e7-859b-f78b612cde28-ecs.json +++ b/metricbeat/module/kubernetes/_meta/kibana/7/visualization/e1018b90-2bfb-11e7-859b-f78b612cde28-ecs.json @@ -57,11 +57,9 @@ "type": "sum" } ], - "override_index_pattern": 1, "point_size": 1, "seperate_axis": 0, "series_interval": "10s", - "series_time_field": "@timestamp", "split_color_mode": "gradient", "split_mode": "everything", "stacked": "none" @@ -85,4 +83,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:37.319Z", "version": "WzQwMTYsMV0=" -} \ No newline at end of file +} From 4c4400e2300faf5dcf3fb1b46b6eb2c499c9f866 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 17 Jan 2022 17:29:42 +0100 Subject: [PATCH 11/30] Update Index template loading guide to use the correct endpoint (#29869) (#29877) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR updates the documentation for loading index templates manually. The endpoint used in the documentation was outdated. That lead to some confusion. (cherry picked from commit 055798ae3561da8cbb4749984d1ff57964fbee2f) Co-authored-by: Noémi Ványi --- libbeat/docs/howto/load-index-templates.asciidoc | 8 ++++---- libbeat/docs/upgrading.asciidoc | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libbeat/docs/howto/load-index-templates.asciidoc b/libbeat/docs/howto/load-index-templates.asciidoc index 200f81548a0..3f55e117f94 100644 --- a/libbeat/docs/howto/load-index-templates.asciidoc +++ b/libbeat/docs/howto/load-index-templates.asciidoc @@ -294,7 +294,7 @@ ifdef::deb_os,rpm_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_index_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::deb_os,rpm_os[] @@ -303,7 +303,7 @@ ifdef::mac_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_index_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::mac_os[] @@ -312,7 +312,7 @@ ifdef::linux_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_index_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::linux_os[] @@ -323,6 +323,6 @@ endif::win_only[] ["source","sh",subs="attributes"] ---- -PS > Invoke-RestMethod -Method Put -ContentType "application/json" -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_template/{beatname_lc}-{version} +PS > Invoke-RestMethod -Method Put -ContentType "application/json" -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_index_template/{beatname_lc}-{version} ---- endif::win_os[] diff --git a/libbeat/docs/upgrading.asciidoc b/libbeat/docs/upgrading.asciidoc index b0370a77812..6d2ae6478f7 100644 --- a/libbeat/docs/upgrading.asciidoc +++ b/libbeat/docs/upgrading.asciidoc @@ -330,7 +330,7 @@ layer. See <>. + ["source","sh",subs="attributes"] ---- -DELETE /_template/metricbeat-{version} +DELETE /_index_template/metricbeat-{version} ---- + Because the index template was loaded without the compatibility layer enabled, From 84bf434017a88c9d1a1860d711bfcfa4abe3c5fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 18 Jan 2022 11:39:59 +0100 Subject: [PATCH 12/30] Enable require_alias for Bulk requests for all actions when target is a write alias (#29879) ## What does this PR do? This PR adds support for requiring alias when using ILM. From now on a `Selector` can tell Elasticsearch client if the target we are shipping events to is an alias or an index. By default, we consider everything an index, and only consider a target an alias when ILM is enabled. The feature is only supported since ES 7.10, so if the user tries to connect to an older version, we cannot help them with this parameter. ## Why is it important? We see issues around ILM sometimes where users have deleted their write alias causing running beats instances to auto-create an index where the write alias should (with auto-mappings to boot, since the template won't be applied). --- CHANGELOG.next.asciidoc | 2 + libbeat/esleg/eslegclient/bulkapi.go | 9 +-- libbeat/idxmgmt/std.go | 4 ++ libbeat/outputs/elasticsearch/client.go | 8 +++ .../elasticsearch/client_integration_test.go | 7 ++- libbeat/outputs/elasticsearch/client_test.go | 60 ++++++++++++++----- .../elasticsearch/death_letter_selector.go | 2 + libbeat/outputs/outil/select.go | 2 + libbeat/outputs/output_reg.go | 2 + 9 files changed, 77 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 88562b2df6e..8eeac98795f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -36,6 +36,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* +- Enable `require_alias` for Bulk requests for all actions when target is a write alias. {issue}27874[27874] {pull}29879[29879] + *Auditbeat* diff --git a/libbeat/esleg/eslegclient/bulkapi.go b/libbeat/esleg/eslegclient/bulkapi.go index 70118d57fd5..44027a837d3 100644 --- a/libbeat/esleg/eslegclient/bulkapi.go +++ b/libbeat/esleg/eslegclient/bulkapi.go @@ -51,10 +51,11 @@ type BulkDeleteAction struct { } type BulkMeta struct { - Index string `json:"_index" struct:"_index"` - DocType string `json:"_type,omitempty" struct:"_type,omitempty"` - Pipeline string `json:"pipeline,omitempty" struct:"pipeline,omitempty"` - ID string `json:"_id,omitempty" struct:"_id,omitempty"` + Index string `json:"_index" struct:"_index"` + DocType string `json:"_type,omitempty" struct:"_type,omitempty"` + Pipeline string `json:"pipeline,omitempty" struct:"pipeline,omitempty"` + ID string `json:"_id,omitempty" struct:"_id,omitempty"` + RequireAlias bool `json:"require_alias,omitempty" struct:"require_alias,omitempty"` } type bulkRequest struct { diff --git a/libbeat/idxmgmt/std.go b/libbeat/idxmgmt/std.go index b1f0071ade2..d8f032c4f42 100644 --- a/libbeat/idxmgmt/std.go +++ b/libbeat/idxmgmt/std.go @@ -337,6 +337,8 @@ func (s *ilmIndexSelector) Select(evt *beat.Event) (string, error) { return idx, err } +func (s ilmIndexSelector) IsAlias() bool { return true } + func (s indexSelector) Select(evt *beat.Event) (string, error) { if idx := getEventCustomIndex(evt, s.beatInfo); idx != "" { return idx, nil @@ -344,6 +346,8 @@ func (s indexSelector) Select(evt *beat.Event) (string, error) { return s.sel.Select(evt) } +func (s indexSelector) IsAlias() bool { return false } + func getEventCustomIndex(evt *beat.Event, beatInfo beat.Info) string { if len(evt.Meta) == 0 { return "" diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index deab29c3dcd..3900bdeb479 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -317,6 +317,10 @@ func (client *Client) createEventBulkMeta(version common.Version, event *beat.Ev ID: id, } + if isRequireAliasSupported(version) { + meta.RequireAlias = client.index.IsAlias() + } + if opType == events.OpTypeDelete { if id != "" { return eslegclient.BulkDeleteAction{Delete: meta}, nil @@ -333,6 +337,10 @@ func (client *Client) createEventBulkMeta(version common.Version, event *beat.Ev return eslegclient.BulkIndexAction{Index: meta}, nil } +func isRequireAliasSupported(version common.Version) bool { + return !version.LessThan(common.MustNewVersion("7.10.0")) +} + func (client *Client) getPipeline(event *beat.Event) (string, error) { if event.Meta != nil { pipeline, err := events.GetMetaStringValue(*event, events.FieldMetaPipeline) diff --git a/libbeat/outputs/elasticsearch/client_integration_test.go b/libbeat/outputs/elasticsearch/client_integration_test.go index 2b05f8a3cdb..1bf5d99ce29 100644 --- a/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/libbeat/outputs/elasticsearch/client_integration_test.go @@ -420,7 +420,8 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu } info := beat.Info{Beat: "libbeat"} - im, _ := idxmgmt.DefaultSupport(nil, info, nil) + // ILM must be disabled otherwise custom index settings are ignored. + im, _ := idxmgmt.DefaultSupport(nil, info, disabledILMConfig()) output, err := makeES(im, info, stats, config) if err != nil { t.Fatal(err) @@ -438,6 +439,10 @@ func connectTestEs(t *testing.T, cfg interface{}, stats outputs.Observer) (outpu return client, client } +func disabledILMConfig() *common.Config { + return common.MustNewConfigFrom(map[string]interface{}{"setup": map[string]interface{}{"ilm": map[string]interface{}{"enabled": false}}}) +} + // setupRoleMapping sets up role mapping for the Kerberos user beats@ELASTIC func setupRoleMapping(t *testing.T, host string) error { _, client := connectTestEsWithoutStats(t, map[string]interface{}{ diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index 2a03d10481d..9cdd43dca08 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -409,22 +409,49 @@ func TestClientWithHeaders(t *testing.T) { func TestBulkEncodeEvents(t *testing.T) { cases := map[string]struct { - version string - docType string - config common.MapStr - events []common.MapStr + version string + docType string + config common.MapStr + ilmConfig *common.Config + isAlias bool + events []common.MapStr }{ "6.x": { - version: "6.8.0", - docType: "doc", - config: common.MapStr{}, - events: []common.MapStr{{"message": "test"}}, + version: "6.8.0", + docType: "doc", + config: common.MapStr{}, + ilmConfig: common.NewConfig(), + events: []common.MapStr{{"message": "test"}}, }, - "latest": { - version: version.GetDefaultVersion(), - docType: "", - config: common.MapStr{}, - events: []common.MapStr{{"message": "test"}}, + "require_alias not supported": { + version: "7.9.0", + docType: "", + config: common.MapStr{}, + ilmConfig: common.NewConfig(), + events: []common.MapStr{{"message": "test"}}, + }, + "require_alias is supported": { + version: "7.10.0", + docType: "", + config: common.MapStr{}, + ilmConfig: common.NewConfig(), + isAlias: true, + events: []common.MapStr{{"message": "test"}}, + }, + "latest with ILM": { + version: version.GetDefaultVersion(), + docType: "", + config: common.MapStr{}, + ilmConfig: common.NewConfig(), + isAlias: true, + events: []common.MapStr{{"message": "test"}}, + }, + "latest without ILM": { + version: version.GetDefaultVersion(), + docType: "", + config: common.MapStr{}, + ilmConfig: disabledILMConfig(), + events: []common.MapStr{{"message": "test"}}, }, } @@ -437,7 +464,7 @@ func TestBulkEncodeEvents(t *testing.T) { Version: test.version, } - im, err := idxmgmt.DefaultSupport(nil, info, common.NewConfig()) + im, err := idxmgmt.DefaultSupport(nil, info, test.ilmConfig) require.NoError(t, err) index, pipeline, err := buildSelectors(im, info, cfg) @@ -479,6 +506,7 @@ func TestBulkEncodeEvents(t *testing.T) { } assert.NotEqual(t, "", meta.Index) + assert.Equal(t, test.isAlias, meta.RequireAlias) assert.Equal(t, test.docType, meta.DocType) } @@ -487,6 +515,10 @@ func TestBulkEncodeEvents(t *testing.T) { } } +func disabledILMConfig() *common.Config { + return common.MustNewConfigFrom(map[string]interface{}{"setup": map[string]interface{}{"ilm": map[string]interface{}{"enabled": false}}}) +} + func TestBulkEncodeEventsWithOpType(t *testing.T) { cases := []common.MapStr{ {"_id": "111", "op_type": e.OpTypeIndex, "message": "test 1", "bulkIndex": 0}, diff --git a/libbeat/outputs/elasticsearch/death_letter_selector.go b/libbeat/outputs/elasticsearch/death_letter_selector.go index 02bd3780cab..34184c80c1a 100644 --- a/libbeat/outputs/elasticsearch/death_letter_selector.go +++ b/libbeat/outputs/elasticsearch/death_letter_selector.go @@ -34,3 +34,5 @@ func (d DeadLetterSelector) Select(event *beat.Event) (string, error) { } return d.Selector.Select(event) } + +func (d DeadLetterSelector) IsAlias() bool { return false } diff --git a/libbeat/outputs/outil/select.go b/libbeat/outputs/outil/select.go index 1615a3bdb11..ebe55674e4a 100644 --- a/libbeat/outputs/outil/select.go +++ b/libbeat/outputs/outil/select.go @@ -87,6 +87,8 @@ func (s Selector) Select(evt *beat.Event) (string, error) { return s.sel.sel(evt) } +func (s Selector) IsAlias() bool { return false } + // IsEmpty checks if the selector is not configured and will always return an empty string. func (s Selector) IsEmpty() bool { return s.sel == nilSelector || s.sel == nil diff --git a/libbeat/outputs/output_reg.go b/libbeat/outputs/output_reg.go index 86c1323c505..f4abc63298a 100644 --- a/libbeat/outputs/output_reg.go +++ b/libbeat/outputs/output_reg.go @@ -43,8 +43,10 @@ type IndexManager interface { } // IndexSelector is used to find the index name an event shall be indexed to. +// It also used to check if during indexing required_alias should be set. type IndexSelector interface { Select(event *beat.Event) (string, error) + IsAlias() bool } // Group configures and combines multiple clients into load-balanced group of clients From 87cc0f95eda69d0f12a9aff2a842669b6ce0c500 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 12:25:50 +0100 Subject: [PATCH 13/30] Fix Filebeat dissect processor field tokenization in documentation (#29680) (#29883) Signed-off-by: inge4pres (cherry picked from commit e1ca29dc7f2d64ef72829f886f09fc3114e76f26) Co-authored-by: Francesco Gualazzi --- libbeat/processors/dissect/docs/dissect.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libbeat/processors/dissect/docs/dissect.asciidoc b/libbeat/processors/dissect/docs/dissect.asciidoc index b3dcf240c7e..a7a68a45c12 100644 --- a/libbeat/processors/dissect/docs/dissect.asciidoc +++ b/libbeat/processors/dissect/docs/dissect.asciidoc @@ -74,14 +74,14 @@ For this example, imagine that an application generates the following messages: "789 - App02 - Database is refreshing tables" ---- -Use the `dissect` processor to split each message into two fields, for example, +Use the `dissect` processor to split each message into three fields, for example, `service.pid`, `service.name` and `service.status`: [source,yaml] ---- processors: - dissect: - tokenizer: '"%{pid|integer} - %{service.name} - %{service.status}"' + tokenizer: '"%{service.pid|integer} - %{service.name} - %{service.status}"' field: "message" target_prefix: "" ---- @@ -98,7 +98,7 @@ This configuration produces fields like: ---- `service.name` is an ECS {ref}/keyword.html[keyword field], which means that you -can use it in {es} for filtering, sorting, and aggregations. +can use it in {es} for filtering, sorting, and aggregations. When possible, use ECS-compatible field names. For more information, see the {ecs-ref}/index.html[Elastic Common Schema] documentation. From 89fd50e74a1d01ab32d7d717a1c92125d83708dd Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 18 Jan 2022 10:12:12 -0500 Subject: [PATCH 14/30] [Automation] Update elastic stack version to 7.17.0-079761a0 for testing (#29864) Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 7c3db75de44..e775e69d34e 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-560490e9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-079761a0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -17,7 +17,7 @@ services: - "indices.id_field_data.enabled=true" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.17.0-560490e9-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.17.0-079761a0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -27,7 +27,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.17.0-560490e9-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.17.0-079761a0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 4a8ffcc1e59..15d2db2a94e 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-560490e9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-079761a0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -22,7 +22,7 @@ services: - "ingest.geoip.downloader.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.17.0-560490e9-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.17.0-079761a0-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -32,7 +32,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.17.0-560490e9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.17.0-079761a0-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 5409d1927412b0e28c7c64d9c00bc4dccfd9aa75 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 10:50:50 -0500 Subject: [PATCH 15/30] Fix YAML indentation in `parsers` examples (#29663) (#29894) See discussion on https://discuss.elastic.co/t/filebeat-filestream-input-parsers-multiline-fails/290543/9. (cherry picked from commit 9e0dad7bdd06ffa957f0c2492be1a6a6916267fe) Co-authored-by: Steve Mokris --- .../input-filestream-reader-options.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/filebeat/docs/inputs/input-filestream-reader-options.asciidoc b/filebeat/docs/inputs/input-filestream-reader-options.asciidoc index b2c0fa2fb70..2624928b154 100644 --- a/filebeat/docs/inputs/input-filestream-reader-options.asciidoc +++ b/filebeat/docs/inputs/input-filestream-reader-options.asciidoc @@ -164,11 +164,11 @@ The multiline message is stored under the key `msg`. ... parsers: - ndjson: - keys_under_root: true - message_key: msg + keys_under_root: true + message_key: msg - multiline: - type: counter - lines_count: 3 + type: counter + lines_count: 3 ---- See the available parser settings in detail below. @@ -197,9 +197,9 @@ Example configuration: [source,yaml] ---- - ndjson: - keys_under_root: true - add_error_key: true - message_key: log + keys_under_root: true + add_error_key: true + message_key: log ---- *`keys_under_root`*:: By default, the decoded JSON is placed under a "json" key @@ -256,5 +256,5 @@ all containers under the default Kubernetes logs path: - "/var/log/containers/*.log" parsers: - container: - stream: stdout + stream: stdout ---- From 5d841312f81bb2a16e03a2feb7e2508718680aaa Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 11:19:58 -0500 Subject: [PATCH 16/30] [7.17](backport #29681) Change docker image from CentOS 7 to Ubuntu 20.04 (#29817) * Change docker image from CentOS 7 to Ubuntu 20.04 (#29681) * Switch to Ubuntu 20.04. * Fix Dockerfile. * Change to amd64 in Dockerfile.tmpl. * Add missing curl and ca-certificate deps. Change back to x86_64. * Fix issues with setcap and xz extraction. * Add changelog, fix remaining issues. * Fix synthetics deps * Fix apt-get. Remove todo from packages.yml. Co-authored-by: Andrew Cholakian (cherry picked from commit febc7ddbfa4b8c7f39fb8a05b8d90864f0199e93) # Conflicts: # dev-tools/packaging/packages.yml # dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl # metricbeat/Dockerfile * Fix merge conflicts. * Fix issue in Dockerfile.elastic-agent.tmpl. Co-authored-by: Blake Rouse --- .ci/packer_cache.sh | 2 +- CHANGELOG.next.asciidoc | 1 + auditbeat/Dockerfile | 2 +- dev-tools/packaging/packages.yml | 16 ++--- .../docker/Dockerfile.elastic-agent.tmpl | 67 ++++++++++++------- .../templates/docker/Dockerfile.tmpl | 67 +++++++++++++------ filebeat/Dockerfile | 2 +- heartbeat/Dockerfile | 2 +- libbeat/Dockerfile | 2 +- metricbeat/Dockerfile | 4 +- packetbeat/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/libbeat/Dockerfile | 2 +- 13 files changed, 106 insertions(+), 65 deletions(-) diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 1627343c612..9b444376924 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -21,7 +21,7 @@ function dockerPullCommonImages() { docker.elastic.co/observability-ci/database-enterprise:12.2.0.1 docker.elastic.co/beats-dev/fpm:1.11.0 golang:1.14.12-stretch - centos:7 + ubuntu:20.04 " for image in ${DOCKER_IMAGES} ; do (retry 2 docker pull ${image}) || echo "Error pulling ${image} Docker image. Continuing." diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 8eeac98795f..5f35daf8fd7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -11,6 +11,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* - Add job.name in pods controlled by Jobs {pull}28954[28954] +- Change Docker base image from CentOS 7 to Ubuntu 20.04 {pull}29681[29681] *Auditbeat* diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index 061539b6dcf..08cff3f06a7 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ python3 \ python3-pip \ python3-venv \ diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index f12004a64cb..8bd34bd3853 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -476,8 +476,8 @@ shared: - &agent_docker_spec <<: *agent_binary_spec extra_vars: - from: 'centos:7' - buildFrom: 'centos:7' + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' dockerfile: 'Dockerfile.elastic-agent.tmpl' docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' user: '{{ .BeatName }}' @@ -496,8 +496,8 @@ shared: - &agent_docker_arm_spec <<: *agent_docker_spec extra_vars: - from: 'arm64v8/centos:7' - buildFrom: 'arm64v8/centos:7' + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' - &agent_docker_complete_spec <<: *agent_docker_spec @@ -648,8 +648,8 @@ shared: - &docker_spec <<: *binary_spec extra_vars: - from: 'centos:7' - buildFrom: 'centos:7' + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' user: '{{ .BeatName }}' linux_capabilities: '' files: @@ -661,8 +661,8 @@ shared: - &docker_arm_spec <<: *docker_spec extra_vars: - from: 'arm64v8/centos:7' - buildFrom: 'arm64v8/centos:7' + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' - &docker_ubi_spec extra_vars: diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index ce4503c39cf..8958601cfe9 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -22,43 +22,50 @@ RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_s {{- end }} true -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary -RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} -{{- end }} - FROM {{ .from }} ENV BEAT_SETUID_AS={{ .user }} {{- if contains .from "ubi-minimal" }} -RUN for iter in {1..10}; do microdnf update -y && microdnf install -y shadow-utils jq && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) +RUN for iter in {1..10}; do microdnf update -y && microdnf install -y findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) {{- else }} -# Installing jq needs to be installed after epel-release and cannot be in the same yum install command. -RUN case $(arch) in aarch64) YUM_FLAGS="-x bind-license";; esac; \ - for iter in {1..10}; do \ - yum update -y $YUM_FLAGS && \ - yum install -y epel-release && \ - yum update -y $YUM_FLAGS && \ - yum install -y jq && \ - - yum clean all && \ - exit_code=0 && break || exit_code=$? && echo "yum error: retry $iter in 10s" && sleep 10; \ +RUN for iter in {1..10}; do \ + apt-get update -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl libcap2-bin xz-utils && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ done; \ (exit $exit_code) {{- end }} {{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN for iter in {1..10}; do \ - yum -y install atk gtk gdk xrandr pango libXcomposite libXcursor libXdamage \ - libXext libXi libXtst libXScrnSaver libXrandr GConf2 \ - alsa-lib atk gtk3 ipa-gothic-fonts xorg-x11-fonts-100dpi xorg-x11-fonts-75dpi xorg-x11-utils \ - xorg-x11-fonts-cyrillic xorg-x11-fonts-Type1 xorg-x11-fonts-misc \ - yum clean all && \ - exit_code=0 && break || exit_code=$? && echo "yum error: retry $iter in 10s" && sleep 10; \ +RUN apt-get update -y && \ + for iter in {1..10}; do \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ + libglib2.0-0\ + libnss3\ + libnspr4\ + libatk1.0-0\ + libatk-bridge2.0-0\ + libcups2\ + libdrm2\ + libdbus-1-3\ + libxcb1\ + libxkbcommon0\ + libx11-6\ + libxcomposite1\ + libxdamage1\ + libxext6\ + libxfixes3\ + libxrandr2\ + libgbm1\ + libpango-1.0-0\ + libcairo2\ + libasound2\ + libatspi2.0-0\ + libxshmfence1 && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ done; \ (exit $exit_code) ENV NODE_PATH={{ $beatHome }}/.node @@ -137,6 +144,14 @@ RUN mkdir /licenses COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary +RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} +{{- end }} + {{- if ne .user "root" }} RUN groupadd --gid 1000 {{ .BeatName }} RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 91a636f50b0..9309516bd80 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -18,36 +18,52 @@ RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/logs && \ {{- end }} chmod 0775 {{ $beatHome }}/data {{ $beatHome }}/logs -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary -RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} -{{- end }} - FROM {{ .from }} {{- if contains .from "ubi-minimal" }} RUN microdnf -y update && \ - microdnf install shadow-utils && \ + microdnf install findutils shadow-utils && \ microdnf clean all {{- else }} -# FIXME: Package bind-license failed to update in arm -RUN case $(arch) in aarch64) YUM_FLAGS="-x bind-license";; esac; \ - yum -y update $YUM_FLAGS \ - {{- if (eq .BeatName "heartbeat") }} - && yum -y install epel-release \ - && yum -y install atk gtk gdk xrandr pango libXcomposite libXcursor libXdamage \ - libXext libXi libXtst libXScrnSaver libXrandr GConf2 \ - alsa-lib atk gtk3 ipa-gothic-fonts xorg-x11-fonts-100dpi xorg-x11-fonts-75dpi xorg-x11-utils \ - xorg-x11-fonts-cyrillic xorg-x11-fonts-Type1 xorg-x11-fonts-misc \ - {{- end }} - && yum clean all && rm -rf /var/cache/yum - # See https://access.redhat.com/discussions/3195102 for why rm is needed +RUN for iter in {1..10}; do \ + apt-get update -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl libcap2-bin xz-utils && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) {{- end }} {{- if (and (eq .BeatName "heartbeat") (not (contains .from "ubi-minimal"))) }} +RUN apt-get update -y && \ + for iter in {1..10}; do \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ + libglib2.0-0\ + libnss3\ + libnspr4\ + libatk1.0-0\ + libatk-bridge2.0-0\ + libcups2\ + libdrm2\ + libdbus-1-3\ + libxcb1\ + libxkbcommon0\ + libx11-6\ + libxcomposite1\ + libxdamage1\ + libxext6\ + libxfixes3\ + libxrandr2\ + libgbm1\ + libpango-1.0-0\ + libcairo2\ + libasound2\ + libatspi2.0-0\ + libxshmfence1 && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) ENV NODE_PATH={{ $beatHome }}/.node RUN echo \ $NODE_PATH \ @@ -93,6 +109,7 @@ RUN set -e ; \ TINI_BIN=""; \ TINI_SHA256=""; \ TINI_VERSION="v0.19.0"; \ + echo "The arch value is $(arch)"; \ case "$(arch)" in \ x86_64) \ TINI_BIN="tini-amd64"; \ @@ -120,6 +137,14 @@ RUN mkdir /licenses COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary +RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} +{{- end }} + {{- if ne .user "root" }} RUN groupadd --gid 1000 {{ .BeatName }} RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} diff --git a/filebeat/Dockerfile b/filebeat/Dockerfile index 763572e5a99..3b17f95e998 100644 --- a/filebeat/Dockerfile +++ b/filebeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ libsystemd-dev \ netcat \ rsync \ diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index 355510ff31f..f4a1faae369 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ netcat \ python3 \ python3-pip \ diff --git a/libbeat/Dockerfile b/libbeat/Dockerfile index 3ff2e7a8ce4..97a1b634384 100644 --- a/libbeat/Dockerfile +++ b/libbeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ netcat \ libpcap-dev \ python3 \ diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index a264c3cfa6c..31f93ed4ef9 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,8 +1,8 @@ FROM golang:1.17.5 RUN \ - apt-get update \ - && apt-get install -y --no-install-recommends \ + apt update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -qq -y --no-install-recommends \ netcat \ python3 \ python3-dev \ diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index 6223e3d70f0..0a5a4a84128 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ python3 \ python3-pip \ python3-venv \ diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index f07760a8487..5a75f945955 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ netcat \ rsync \ python3 \ diff --git a/x-pack/libbeat/Dockerfile b/x-pack/libbeat/Dockerfile index 12ce0e09203..78c387b04d9 100644 --- a/x-pack/libbeat/Dockerfile +++ b/x-pack/libbeat/Dockerfile @@ -2,7 +2,7 @@ FROM golang:1.17.5 RUN \ apt-get update \ - && apt-get install -y --no-install-recommends \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ netcat \ rsync \ python3 \ From b4314adb38e94075d61d6e579051f16a895cd229 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Tue, 18 Jan 2022 19:46:14 +0100 Subject: [PATCH 17/30] Merge pull request from GHSA-rj4h-hqvq-cc6q * libbeat: fix IP and hostname validation on TLS certificates * handle the case when TLSConfig is nil * fix spelling --- .../common/transport/tlscommon/tls_config.go | 38 +- .../transport/tlscommon/tls_config_test.go | 360 ++++++++++++++++++ 2 files changed, 395 insertions(+), 3 deletions(-) diff --git a/libbeat/common/transport/tlscommon/tls_config.go b/libbeat/common/transport/tlscommon/tls_config.go index 77c60f951f8..be1121fe38b 100644 --- a/libbeat/common/transport/tlscommon/tls_config.go +++ b/libbeat/common/transport/tlscommon/tls_config.go @@ -74,6 +74,9 @@ type TLSConfig struct { // the server certificate. CASha256 []string + // ServerName is the remote server we're connecting to. It can be a hostname or IP address. + ServerName string + // time returns the current time as the number of seconds since the epoch. // If time is nil, TLS uses time.Now. time func() time.Time @@ -123,12 +126,18 @@ func (c *TLSConfig) BuildModuleClientConfig(host string) *tls.Config { InsecureSkipVerify: true, VerifyConnection: makeVerifyConnection(&TLSConfig{ Verification: VerifyFull, + ServerName: host, }), } } config := c.ToConfig() + // config.ServerName does not verify IP addresses config.ServerName = host + + // Keep a copy of the host (wheather an IP or hostname) + // for later validation. It is used by makeVerifyConnection + c.ServerName = host return config } @@ -141,6 +150,7 @@ func (c *TLSConfig) BuildServerConfig(host string) *tls.Config { InsecureSkipVerify: true, VerifyConnection: makeVerifyServerConnection(&TLSConfig{ Verification: VerifyFull, + ServerName: host, }), } } @@ -154,6 +164,9 @@ func (c *TLSConfig) BuildServerConfig(host string) *tls.Config { func makeVerifyConnection(cfg *TLSConfig) func(tls.ConnectionState) error { switch cfg.Verification { case VerifyFull: + // Cert is trusted by CA + // Hostname or IP matches the certificate + // tls.Config.InsecureSkipVerify is set to true return func(cs tls.ConnectionState) error { // On the client side, PeerCertificates can't be empty. if len(cs.PeerCertificates) == 0 { @@ -164,13 +177,15 @@ func makeVerifyConnection(cfg *TLSConfig) func(tls.ConnectionState) error { Roots: cfg.RootCAs, Intermediates: x509.NewCertPool(), } - err := verifyCertsWithOpts(cs.PeerCertificates, cfg.CASha256, opts) - if err != nil { + if err := verifyCertsWithOpts(cs.PeerCertificates, cfg.CASha256, opts); err != nil { return err } - return verifyHostname(cs.PeerCertificates[0], cs.ServerName) + return verifyHostname(cs.PeerCertificates[0], cfg.ServerName) } case VerifyCertificate: + // Cert is trusted by CA + // Does NOT validate hostname or IP addresses + // tls.Config.InsecureSkipVerify is set to true return func(cs tls.ConnectionState) error { // On the client side, PeerCertificates can't be empty. if len(cs.PeerCertificates) == 0 { @@ -184,6 +199,12 @@ func makeVerifyConnection(cfg *TLSConfig) func(tls.ConnectionState) error { return verifyCertsWithOpts(cs.PeerCertificates, cfg.CASha256, opts) } case VerifyStrict: + // Cert is trusted by CA + // Hostname or IP matches the certificate + // Returns error if SNA is empty + // The whole validation is done by Go's standard library default + // SSL/TLS verification (tls.Config.InsecureSkipVerify is set to false) + // so we only need to check the pin if len(cfg.CASha256) > 0 { return func(cs tls.ConnectionState) error { return verifyCAPin(cfg.CASha256, cs.VerifiedChains) @@ -262,6 +283,9 @@ func verifyCertsWithOpts(certs []*x509.Certificate, casha256 []string, opts x509 return nil } +// verifyHostname verifies if the provided hostnmae matches +// cert.DNSNames, cert.OPAddress (SNA) +// For hostnames, if SNA is empty, validate the hostname against cert.Subject.CommonName func verifyHostname(cert *x509.Certificate, hostname string) error { if hostname == "" { return nil @@ -278,6 +302,14 @@ func verifyHostname(cert *x509.Certificate, hostname string) error { return nil } } + + parsedCNIP := net.ParseIP(cert.Subject.CommonName) + if parsedCNIP != nil { + if parsedIP.Equal(parsedCNIP) { + return nil + } + } + return x509.HostnameError{Certificate: cert, Host: hostname} } diff --git a/libbeat/common/transport/tlscommon/tls_config_test.go b/libbeat/common/transport/tlscommon/tls_config_test.go index 76dfa61497f..06987959d36 100644 --- a/libbeat/common/transport/tlscommon/tls_config_test.go +++ b/libbeat/common/transport/tlscommon/tls_config_test.go @@ -18,12 +18,22 @@ package tlscommon import ( + "bytes" + "crypto/rand" + "crypto/rsa" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" "encoding/pem" "io/ioutil" + "log" + "math/big" + "net" + "net/http" + "net/url" "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -210,3 +220,353 @@ func openTestCerts() (map[string]*x509.Certificate, error) { return certs, nil } + +func TestVerificationMode(t *testing.T) { + testcases := map[string]struct { + verificationMode TLSVerificationMode + serverName string + certHostname string + expectingError bool + ignoreCerts bool + emptySNA bool + legacyCN bool + }{ + "VerifyFull validates domain": { + verificationMode: VerifyFull, + serverName: "localhost", + certHostname: "localhost", + }, + "VerifyFull validates IPv4": { + verificationMode: VerifyFull, + serverName: "127.0.0.1", + certHostname: "127.0.0.1", + }, + "VerifyFull validates IPv6": { + verificationMode: VerifyFull, + serverName: "::1", + certHostname: "::1", + }, + "VerifyFull domain mismatch returns error": { + verificationMode: VerifyFull, + serverName: "localhost", + certHostname: "elastic.co", + expectingError: true, + }, + "VerifyFull IPv4 mismatch returns error": { + verificationMode: VerifyFull, + serverName: "127.0.0.1", + certHostname: "1.2.3.4", + expectingError: true, + }, + "VerifyFull IPv6 mismatch returns error": { + verificationMode: VerifyFull, + serverName: "::1", + certHostname: "faca:b0de:baba::ca", + expectingError: true, + }, + "VerifyFull does not return error when SNA is empty and legacy Common Name is used": { + verificationMode: VerifyFull, + serverName: "localhost", + certHostname: "localhost", + emptySNA: true, + legacyCN: true, + expectingError: false, + }, + "VerifyFull does not return error when SNA is empty and legacy Common Name is used with IP address": { + verificationMode: VerifyFull, + serverName: "127.0.0.1", + certHostname: "127.0.0.1", + emptySNA: true, + legacyCN: true, + expectingError: false, + }, + + "VerifyStrict": { + verificationMode: VerifyStrict, + serverName: "localhost", + certHostname: "localhost", + }, + "VerifyStrict validates domain": { + verificationMode: VerifyStrict, + serverName: "localhost", + certHostname: "localhost", + }, + "VerifyStrict validates IPv4": { + verificationMode: VerifyStrict, + serverName: "127.0.0.1", + certHostname: "127.0.0.1", + }, + "VerifyStrict validates IPv6": { + verificationMode: VerifyStrict, + serverName: "::1", + certHostname: "::1", + }, + "VerifyStrict domain mismatch returns error": { + verificationMode: VerifyStrict, + serverName: "127.0.0.1", + certHostname: "elastic.co", + expectingError: true, + }, + "VerifyStrict IPv4 mismatch returns error": { + verificationMode: VerifyStrict, + serverName: "127.0.0.1", + certHostname: "1.2.3.4", + expectingError: true, + }, + "VerifyStrict IPv6 mismatch returns error": { + verificationMode: VerifyStrict, + serverName: "::1", + certHostname: "faca:b0de:baba::ca", + expectingError: true, + }, + "VerifyStrict return error when SNA is empty and legacy Common Name is used": { + verificationMode: VerifyStrict, + serverName: "localhost", + certHostname: "localhost", + emptySNA: true, + legacyCN: true, + expectingError: true, + }, + "VerifyStrict return error when SNA is empty and legacy Common Name is used with IP address": { + verificationMode: VerifyStrict, + serverName: "127.0.0.1", + certHostname: "127.0.0.1", + emptySNA: true, + legacyCN: true, + expectingError: true, + }, + "VerifyStrict returns error when SNA is empty": { + verificationMode: VerifyStrict, + serverName: "localhost", + certHostname: "localhost", + emptySNA: true, + expectingError: true, + }, + + "VerifyCertificate does not validate domain": { + verificationMode: VerifyCertificate, + serverName: "localhost", + certHostname: "elastic.co", + }, + "VerifyCertificate does not validate IPv4": { + verificationMode: VerifyCertificate, + serverName: "127.0.0.1", + certHostname: "elastic.co", + }, + "VerifyCertificate does not validate IPv6": { + verificationMode: VerifyCertificate, + serverName: "127.0.0.1", + certHostname: "faca:b0de:baba::ca", + }, + + "VerifyNone accepts untrusted certificates": { + verificationMode: VerifyNone, + serverName: "127.0.0.1", + certHostname: "faca:b0de:baba::ca", + ignoreCerts: true, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + serverURL, caCert := startTestServer(t, test.certHostname, test.emptySNA, test.legacyCN) + certPool := x509.NewCertPool() + certPool.AddCert(caCert) + + tlsC := TLSConfig{ + Verification: test.verificationMode, + RootCAs: certPool, + ServerName: test.serverName, + } + + if test.ignoreCerts { + tlsC.RootCAs = nil + tlsC.ServerName = "" + } + + client := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsC.BuildModuleClientConfig(test.serverName), + }, + } + + resp, err := client.Get(serverURL.String()) + if test.expectingError { + if err != nil { + // We got the expected error, no need to check the status code + return + } + } + + if err != nil { + t.Fatalf("did not expect an error: %v", err) + } + + if resp.StatusCode != 200 { + t.Fatalf("expecting 200 got: %d", resp.StatusCode) + } + }) + } +} + +// startTestServer starts a HTTP server for testing and returns it's certificates. +// If `address` is a hostname it will be added to the leaf certificate CN. +// Regardless of being a hostname or IP, `address` will be added to the correct +// SNA. +// +// New certificates are generated for each HTTP server, they use RSA 1024 bits, it +// is not the safest, but it's enough for tests. +// The HTTP server will shutdown at the end of the test. +func startTestServer(t *testing.T, address string, emptySNA, legacyCN bool) (url.URL, *x509.Certificate) { + // Creates a listener on a random port selected by the OS + l, err := net.Listen("tcp", "localhost:0") + t.Cleanup(func() { l.Close() }) + + // l.Addr().String() will return something like: 127.0.0.1:12345, + // add the protocol and parse the URL + serverURL, err := url.Parse("https://" + l.Addr().String()) + if err != nil { + t.Fatal(err) + } + + // Generate server ceritficates for the given address + // and start the server + caCert, serverCert := genVerifyCerts(t, address, emptySNA, legacyCN) + server := http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("SSL test server")) + }), + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + }, + } + t.Cleanup(func() { server.Close() }) + go server.ServeTLS(l, "", "") + + return *serverURL, caCert +} + +func genVerifyCerts(t *testing.T, hostnameOrIP string, emptySNA, legacyCN bool) (*x509.Certificate, tls.Certificate) { + t.Helper() + + hostname := "" + ipAddress := net.ParseIP(hostnameOrIP) + subjectCommonName := "You Know, for Search" + + if legacyCN { + // Legacy behaviour of using the Common Name field to hold + // a hostname or IP address + subjectCommonName = hostnameOrIP + } + + // We set either hostname or ipAddress + if ipAddress == nil { + hostname = hostnameOrIP + } + + // ========================== Root CA Cert + ca := &x509.Certificate{ + SerialNumber: big.NewInt(42), + Subject: pkix.Name{ + Organization: []string{"Root CA Corp"}, + Country: []string{"DE"}, + Province: []string{""}, + Locality: []string{"Berlin"}, + StreetAddress: []string{"PostdamerPlatz"}, + PostalCode: []string{"42"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(100, 0, 0), // 100 years validity + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + // ========================== Generate RootCA private Key + caPrivKey, err := rsa.GenerateKey(rand.Reader, 1024) + if err != nil { + log.Panicf("generating RSA key for CA cert: %v", err) + } + + // ========================== Generate RootCA Cert + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) + if err != nil { + log.Panicf("generating CA certificate: %v", err) + } + + caPEM := new(bytes.Buffer) + pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + }) + + // ========================== Generate Server Certificate (leaf) + cert := &x509.Certificate{ + SerialNumber: big.NewInt(100), + Subject: pkix.Name{ + Organization: []string{"My Server Application Corp"}, + Country: []string{"DE"}, + Province: []string{""}, + Locality: []string{"Berlin"}, + StreetAddress: []string{"AlexanderPlatz"}, + PostalCode: []string{"100"}, + CommonName: subjectCommonName, + }, + + // SNA - Subject Alternate Name we don't populate + EmailAddresses: nil, + URIs: nil, + + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + SubjectKeyId: []byte{1, 2, 3, 4, 6}, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature, + } + + // Set SNA based on what we got + if !emptySNA { + if hostname != "" { + cert.DNSNames = []string{hostnameOrIP} + } + if ipAddress != nil { + cert.IPAddresses = []net.IP{ipAddress} + } + } + + certPrivKey, err := rsa.GenerateKey(rand.Reader, 1024) + if err != nil { + log.Panicf("generating certificate private key: %v", err) + } + + // =========================== Use CA to sign/generate the server (leaf) certificate + certBytes, err := x509.CreateCertificate(rand.Reader, cert, ca, &certPrivKey.PublicKey, caPrivKey) + if err != nil { + log.Panicf("generating certificate: %v", err) + } + + rootCACert, err := x509.ParseCertificate(caBytes) + if err != nil { + t.Fatalf("could not parse rootBytes into a certificate: %v", err) + } + + certPEM := new(bytes.Buffer) + pem.Encode(certPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + + certPrivKeyPEM := new(bytes.Buffer) + pem.Encode(certPrivKeyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey), + }) + + serverCert, err := tls.X509KeyPair(certPEM.Bytes(), certPrivKeyPEM.Bytes()) + if err != nil { + t.Fatalf("could not convert server certificate to tls.Certificate: %v", err) + } + + return rootCACert, serverCert +} From 4921d9e917dccd426c079799bb7708bec40ad6c0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 20:42:06 -0600 Subject: [PATCH 18/30] [7.17][Heartbeat] Defer monitor / ICMP errors to monitor runtime / ES (backport #29413) (#29896) * [Heartbeat] Defer monitor / ICMP errors to monitor runtime / ES (#29413) This PR generally improves the error behavior of all monitors, and some specific ICMP related errors as well. These two items are combined in one PR because the general theme here is improving the ICMP error experience, and improving ICMP required improving all monitors. Fixes #29346 and incremental progress toward #29692 General monitor improvements Generally speaking, per #29692 we are trying to send monitor output to ES wherever possible. With this PR we now send any monitor initialization errors (such as a lack of ICMP kernel capabilities) during monitor creation to ES. We do this by allowing the monitor to initialize and run on schedule, even though we know it will always send the same error message. This lets users more easily debug issues in Kibana. ICMP Specific Improvement This PR also Removes broken a IP capability check that caused heartbeat to be unable to start. We now just rely on return codes from attempts to actually send packets. This is the more specific fix for #29346 . I was not able to exactly reproduce the exact customer reported issue, where the user somehow disabled ipv6 in a way that the ICMP loop that I can't exactly reproduce. I tried disabling ipv6 fully with sudo sysctl net.ipv6.conf.all.disable_ipv6=1 but that didn't yield the error in #29346 The logic is now simplified, there's no truly reliable way to know if you can send an ipv6 (or ipv4) ping before you send it (settings can change at any time! network cards can disappear!), so we just let the error codes happen as the check is executed. This is also generally a better UX in that the errors will now be visible in the Uptime app, not just the logs. It should be noted that the ipv4 and ipv6 boolean options only are documented to affect how DNS lookups happen. With this change the behavior matches the docs. Note that ICMP is a bit weird in that there's a single ICMP loop in heartbeat, and all monitors are really just interacting with that. Removal of .synthetics This also ignores the .synthetics folder which has been inconvenient for some time for devs, in that it dirties the git path (cherry picked from commit 616db13b36da5e4db327b745b333abf54a07345e) * [Heartbeat] Fix broken macOS ICMP test (#29900) Fixes broken macos python e2e test Co-authored-by: Andrew Cholakian Co-authored-by: Justin Kambic --- CHANGELOG.next.asciidoc | 4 ++ heartbeat/monitors/active/icmp/icmp.go | 4 -- heartbeat/monitors/active/icmp/loop.go | 1 - heartbeat/monitors/active/icmp/stdloop.go | 34 ++++--------- heartbeat/monitors/factory_test.go | 7 +-- heartbeat/monitors/mocks_test.go | 61 +++++++++++++++-------- heartbeat/monitors/monitor.go | 21 ++++++-- heartbeat/monitors/monitor_test.go | 38 ++++++++++++-- heartbeat/monitors/stdfields/stdfields.go | 5 +- heartbeat/monitors/task.go | 2 +- heartbeat/tests/system/test_icmp.py | 22 +++----- x-pack/heartbeat/.gitignore | 1 + 12 files changed, 121 insertions(+), 79 deletions(-) create mode 100644 x-pack/heartbeat/.gitignore diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 5f35daf8fd7..e66ae58f2c1 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -19,6 +19,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Heartbeat* +- Fix broken macOS ICMP python e2e test. {pull}29900[29900] - Only add monitor.status to browser events when summary. {pull}29460[29460] - Also add summary to journeys for which the synthetics runner crashes. {pull}29606[29606] @@ -51,6 +52,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Heartbeat* - Fix race condition in http monitors using `mode:all` that can cause crashes. {pull}29697[pull] +- Fix broken ICMP availability check that prevented heartbeat from starting in rare cases. {pull}29413[pull] *Metricbeat* @@ -83,6 +85,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Heartbeat* +- More errors are now visible in ES with new logic failing monitors later to ease debugging. {pull}29413[pull] + *Metricbeat* diff --git a/heartbeat/monitors/active/icmp/icmp.go b/heartbeat/monitors/active/icmp/icmp.go index ef57cdbebae..073660c259a 100644 --- a/heartbeat/monitors/active/icmp/icmp.go +++ b/heartbeat/monitors/active/icmp/icmp.go @@ -91,10 +91,6 @@ func (jf *jobFactory) checkConfig() error { } func (jf *jobFactory) makePlugin() (plugin2 plugin.Plugin, err error) { - if err := jf.loop.checkNetworkMode(jf.ipVersion); err != nil { - return plugin.Plugin{}, err - } - pingFactory := jf.pingIPFactory(&jf.config) var j []jobs.Job diff --git a/heartbeat/monitors/active/icmp/loop.go b/heartbeat/monitors/active/icmp/loop.go index de4d0ef4dfc..b29fa247f16 100644 --- a/heartbeat/monitors/active/icmp/loop.go +++ b/heartbeat/monitors/active/icmp/loop.go @@ -23,7 +23,6 @@ import ( ) type ICMPLoop interface { - checkNetworkMode(mode string) error ping( addr *net.IPAddr, timeout time.Duration, diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index 05858f5537f..9f5f5543967 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -20,7 +20,6 @@ package icmp import ( "bytes" "encoding/binary" - "errors" "fmt" "math/rand" "net" @@ -159,29 +158,6 @@ func newICMPLoop() (*stdICMPLoop, error) { return l, nil } -func (l *stdICMPLoop) checkNetworkMode(mode string) error { - ip4, ip6 := false, false - switch mode { - case "ip4": - ip4 = true - case "ip6": - ip6 = true - case "ip": - ip4, ip6 = true, true - default: - return fmt.Errorf("'%v' is not supported", mode) - } - - if ip4 && l.conn4 == nil { - return errors.New("failed to initiate IPv4 support. Check log details for permission configuration") - } - if ip6 && l.conn6 == nil { - return errors.New("failed to initiate IPv6 support. Check log details for permission configuration") - } - - return nil -} - func (l *stdICMPLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { for { bytes := make([]byte, 512) @@ -251,6 +227,14 @@ func (l *stdICMPLoop) ping( timeout time.Duration, interval time.Duration, ) (time.Duration, int, error) { + isIPv6 := addr.IP.To4() == nil + if isIPv6 && l.conn6 == nil { + return -1, -1, fmt.Errorf("cannot ping IPv6 address '%s', no IPv6 connection available", addr) + } + if !isIPv6 && l.conn4 == nil { + return -1, -1, fmt.Errorf("cannot ping IPv4 address '%s', no IPv4 connection available", addr) + } + var err error toTimer := time.NewTimer(timeout) defer toTimer.Stop() @@ -379,7 +363,7 @@ func (l *stdICMPLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) _, err := conn.WriteTo(encoded, addr) if err != nil { - return nil, err + return nil, fmt.Errorf("could not write to conn: %w", err) } ctx.ts = ts diff --git a/heartbeat/monitors/factory_test.go b/heartbeat/monitors/factory_test.go index c395050aaa1..e4ff3589ccd 100644 --- a/heartbeat/monitors/factory_test.go +++ b/heartbeat/monitors/factory_test.go @@ -149,7 +149,7 @@ func TestPreProcessors(t *testing.T) { } func TestDuplicateMonitorIDs(t *testing.T) { - serverMonConf := mockPluginConf(t, "custom", "@every 1ms", "http://example.net") + serverMonConf := mockPluginConf(t, "custom", "custom", "@every 1ms", "http://example.net") badConf := mockBadPluginConf(t, "custom", "@every 1ms") reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} @@ -190,8 +190,9 @@ func TestDuplicateMonitorIDs(t *testing.T) { m1.Stop() m2.Stop() - // 3 are counted as built, even the bad config - require.Equal(t, 3, built.Load()) + // Two are counted as built. The bad config is missing a stdfield so it + // doesn't complete construction + require.Equal(t, 2, built.Load()) // Only 2 closes, because the bad config isn't closed require.Equal(t, 2, closed.Load()) } diff --git a/heartbeat/monitors/mocks_test.go b/heartbeat/monitors/mocks_test.go index 6d51791e3d7..720088db211 100644 --- a/heartbeat/monitors/mocks_test.go +++ b/heartbeat/monitors/mocks_test.go @@ -99,24 +99,28 @@ func (pc *MockPipelineConnector) ConnectWith(beat.ClientConfig) (beat.Client, er return c, nil } -func mockEventMonitorValidator(id string) validator.Validator { +func baseMockEventMonitorValidator(id string, name string, status string) validator.Validator { var idMatcher isdef.IsDef if id == "" { idMatcher = isdef.IsStringMatching(regexp.MustCompile(`^auto-test-.*`)) } else { idMatcher = isdef.IsEqual(id) } + return lookslike.MustCompile(map[string]interface{}{ + "monitor": map[string]interface{}{ + "id": idMatcher, + "name": name, + "type": "test", + "duration.us": isdef.IsDuration, + "status": status, + "check_group": isdef.IsString, + }, + }) +} + +func mockEventMonitorValidator(id string, name string) validator.Validator { return lookslike.Strict(lookslike.Compose( - lookslike.MustCompile(map[string]interface{}{ - "monitor": map[string]interface{}{ - "id": idMatcher, - "name": "", - "type": "test", - "duration.us": isdef.IsDuration, - "status": "up", - "check_group": isdef.IsString, - }, - }), + baseMockEventMonitorValidator(id, name, "up"), hbtestllext.MonitorTimespanValidator, hbtest.SummaryChecks(1, 0), lookslike.MustCompile(mockEventCustomFields()), @@ -151,15 +155,19 @@ func mockPluginBuilder() (plugin.PluginFactory, *atomic.Int, *atomic.Int) { unpacked := struct { URLs []string `config:"urls" validate:"required"` }{} - err := config.Unpack(&unpacked) - if err != nil { - return plugin.Plugin{}, err - } - j, err := createMockJob() + + // track all closes, even on error closer := func() error { closed.Inc() return nil } + + err := config.Unpack(&unpacked) + if err != nil { + return plugin.Plugin{DoClose: closer}, err + } + j, err := createMockJob() + return plugin.Plugin{Jobs: j, DoClose: closer, Endpoints: 1}, err }, Stats: plugin.NewPluginCountersRecorder("test", reg)}, @@ -174,13 +182,15 @@ func mockPluginsReg() (p *plugin.PluginsReg, built *atomic.Int, closed *atomic.I return reg, built, closed } -func mockPluginConf(t *testing.T, id string, schedule string, url string) *common.Config { +func mockPluginConf(t *testing.T, id string, name string, schedule string, url string) *common.Config { confMap := map[string]interface{}{ "type": "test", "urls": []string{url}, "schedule": schedule, + "name": name, } + // Optional to let us simulate this key missing if id != "" { confMap["id"] = id } @@ -197,7 +207,6 @@ func mockBadPluginConf(t *testing.T, id string, schedule string) *common.Config confMap := map[string]interface{}{ "type": "test", "notanoption": []string{"foo"}, - "schedule": schedule, } if id != "" { @@ -210,8 +219,6 @@ func mockBadPluginConf(t *testing.T, id string, schedule string) *common.Config return conf } -// mockInvalidPlugin conf returns a config that invalid at the basic level of -// what's expected in heartbeat, i.e. no type. func mockInvalidPluginConf(t *testing.T) *common.Config { confMap := map[string]interface{}{ "hoeutnheou": "oueanthoue", @@ -222,3 +229,17 @@ func mockInvalidPluginConf(t *testing.T) *common.Config { return conf } + +func mockInvalidPluginConfWithStdFields(t *testing.T, id string, name string, schedule string) *common.Config { + confMap := map[string]interface{}{ + "type": "test", + "id": id, + "name": name, + "schedule": schedule, + } + + conf, err := common.NewConfigFrom(confMap) + require.NoError(t, err) + + return conf +} diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 669579e31aa..91a6a881d84 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -163,13 +163,26 @@ func newMonitorUnsafe( return p.Close() } - wrappedJobs := wrappers.WrapCommon(p.Jobs, m.stdFields) - m.endpoints = p.Endpoints - + // If we've hit an error at this point, still run on schedule, but always return an error. + // This way the error is clearly communicated through to kibana. + // Since the error is not recoverable in these instances, the user will need to reconfigure + // the monitor, which will destroy and recreate it in heartbeat, thus clearing this error. + // + // Note: we do this at this point, and no earlier, because at a minimum we need the + // standard monitor fields (id, name and schedule) to deliver an error to kibana in a way + // that it can render. if err != nil { - return m, fmt.Errorf("job err %v", err) + // Note, needed to hoist err to this scope, not just to add a prefix + fullErr := fmt.Errorf("job could not be initialized: %s", err) + // A placeholder job that always returns an error + p.Jobs = []jobs.Job{func(event *beat.Event) ([]jobs.Job, error) { + return nil, fullErr + }} } + wrappedJobs := wrappers.WrapCommon(p.Jobs, m.stdFields) + m.endpoints = p.Endpoints + m.configuredJobs, err = m.makeTasks(config, wrappedJobs) if err != nil { return m, err diff --git a/heartbeat/monitors/monitor_test.go b/heartbeat/monitors/monitor_test.go index bbcd5b9b74c..8184a867eae 100644 --- a/heartbeat/monitors/monitor_test.go +++ b/heartbeat/monitors/monitor_test.go @@ -25,19 +25,49 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/heartbeat/scheduler" + "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/go-lookslike" + "github.com/elastic/go-lookslike/isdef" "github.com/elastic/go-lookslike/testslike" + "github.com/elastic/go-lookslike/validator" ) -func TestMonitor(t *testing.T) { - serverMonConf := mockPluginConf(t, "", "@every 1ms", "http://example.net") +// TestMonitorBasic tests a basic config +func TestMonitorBasic(t *testing.T) { + testMonitorConfig( + t, + mockPluginConf(t, "myId", "myName", "@every 1ms", "http://example.net"), + mockEventMonitorValidator("myId", "myName"), + ) +} + +// TestMonitorBasic tests a config that errors out at plugin creation, but still has stdfields defined. +// This should cause the monitor to run, but only produce error documents +func TestMonitorCfgError(t *testing.T) { + testMonitorConfig( + t, + mockInvalidPluginConfWithStdFields(t, "invalidTestId", "invalidTestName", "@every 10s"), + lookslike.Compose( + baseMockEventMonitorValidator("invalidTestId", "invalidTestName", "down"), + lookslike.MustCompile(common.MapStr{ + "error": common.MapStr{ + "message": isdef.IsStringContaining("missing required field"), + "type": "io", + }, + }), + ), + ) +} + +func testMonitorConfig(t *testing.T, conf *common.Config, eventValidator validator.Validator) { reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - mon, err := newMonitor(serverMonConf, reg, pipelineConnector, sched.Add, nil, false) + mon, err := newMonitor(conf, reg, pipelineConnector, sched.Add, nil, false) require.NoError(t, err) mon.Start() @@ -56,7 +86,7 @@ func TestMonitor(t *testing.T) { pcClient.Close() for _, event := range pcClient.Publishes() { - testslike.Test(t, mockEventMonitorValidator(""), event.Fields) + testslike.Test(t, eventValidator, event.Fields) } } else { // Let's yield this goroutine so we don't spin diff --git a/heartbeat/monitors/stdfields/stdfields.go b/heartbeat/monitors/stdfields/stdfields.go index f09161c2adf..92e5bc4bb90 100644 --- a/heartbeat/monitors/stdfields/stdfields.go +++ b/heartbeat/monitors/stdfields/stdfields.go @@ -18,10 +18,9 @@ package stdfields import ( + "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/heartbeat/scheduler/schedule" "github.com/elastic/beats/v7/libbeat/common" ) @@ -46,7 +45,7 @@ func ConfigToStdMonitorFields(config *common.Config) (StdMonitorFields, error) { mpi := StdMonitorFields{Enabled: true} if err := config.Unpack(&mpi); err != nil { - return mpi, errors.Wrap(err, "error unpacking monitor plugin config") + return mpi, fmt.Errorf("error unpacking monitor plugin config: %w", err) } // Use `service_name` if `service.name` is unspecified diff --git a/heartbeat/monitors/task.go b/heartbeat/monitors/task.go index 11b013a9871..a7f1848b3ae 100644 --- a/heartbeat/monitors/task.go +++ b/heartbeat/monitors/task.go @@ -108,7 +108,7 @@ func runPublishJob(job jobs.Job, client *WrappedClient) []scheduler.TaskFunc { conts, err := job(event) if err != nil { - logp.Err("Job %v failed with: ", err) + logp.Err("Job failed with: %s", err) } hasContinuations := len(conts) > 0 diff --git a/heartbeat/tests/system/test_icmp.py b/heartbeat/tests/system/test_icmp.py index 7f61a7430f8..f7be72e89f4 100644 --- a/heartbeat/tests/system/test_icmp.py +++ b/heartbeat/tests/system/test_icmp.py @@ -6,6 +6,7 @@ import sys import time import unittest +import re from beat.beat import INTEGRATION_TESTS from elasticsearch import Elasticsearch from heartbeat import BaseTest @@ -35,17 +36,10 @@ def test_base(self): proc = self.start_beat() - def has_started_message(): return self.log_contains("ICMP loop successfully initialized") - - def has_failed_message(): return self.log_contains("Failed to initialize ICMP loop") - - # We don't know if the system tests are running is configured to support or not support ping, but we can at least check that the ICMP loop - # was initiated. In the future we should start up VMs with the correct perms configured and be more specific. In addition to that - # we should run pings on those machines and make sure they work. - self.wait_until(lambda: has_started_message() or has_failed_message(), 30) - - if has_failed_message(): - proc.check_kill_and_wait(1) - else: - # Check that documents are moving through - self.wait_until(lambda: self.output_has(lines=1)) + # because we have no way of knowing if the current environment has the ability to do ICMP pings + # we are instead asserting the monitor's status via the output and checking for errors where appropriate + self.wait_until(lambda: self.output_has(lines=1)) + output = self.read_output() + monitor_status = output[0]["monitor.status"] + assert monitor_status == "up" or monitor_status == "down" + assert output[0]["monitor.type"] == "icmp" diff --git a/x-pack/heartbeat/.gitignore b/x-pack/heartbeat/.gitignore new file mode 100644 index 00000000000..8af6c73dc61 --- /dev/null +++ b/x-pack/heartbeat/.gitignore @@ -0,0 +1 @@ +.synthetics From 5511d70563d5b7afcf29fb02a5c043fa71ebd884 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 19 Jan 2022 06:56:20 -0500 Subject: [PATCH 19/30] [Automation] Update elastic stack version to 7.17.0-68da5d12 for testing (#29904) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index e775e69d34e..b7b588dce40 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-079761a0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-68da5d12-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -17,7 +17,7 @@ services: - "indices.id_field_data.enabled=true" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.17.0-079761a0-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.17.0-68da5d12-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -27,7 +27,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.17.0-079761a0-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.17.0-68da5d12-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 15d2db2a94e..e4b926ab8bf 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-079761a0-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-68da5d12-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -22,7 +22,7 @@ services: - "ingest.geoip.downloader.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.17.0-079761a0-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.17.0-68da5d12-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -32,7 +32,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.17.0-079761a0-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.17.0-68da5d12-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 8c48fc5bc5ef25c83d817ed65f62ba0e91f314a9 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 20 Jan 2022 02:13:32 -0500 Subject: [PATCH 20/30] [Automation] Update elastic stack version to 7.17.0-e1efbe3a for testing (#29922) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index b7b588dce40..0e476ca5829 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-68da5d12-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-e1efbe3a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -17,7 +17,7 @@ services: - "indices.id_field_data.enabled=true" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.17.0-68da5d12-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.17.0-e1efbe3a-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -27,7 +27,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.17.0-68da5d12-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.17.0-e1efbe3a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index e4b926ab8bf..39d8152b061 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-68da5d12-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-e1efbe3a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -22,7 +22,7 @@ services: - "ingest.geoip.downloader.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.17.0-68da5d12-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.17.0-e1efbe3a-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -32,7 +32,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.17.0-68da5d12-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.17.0-e1efbe3a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 689602b61478e759640ae60c383c499e80a3f6a0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 20 Jan 2022 09:39:32 +0100 Subject: [PATCH 21/30] [7.17](backport #29605) Fix annotation enrichment (#29834) * Fix annotation enrichment (#29605) * add annotation enrichment Signed-off-by: Tetiana Kravchenko * add pr number Signed-off-by: Tetiana Kravchenko * fix tests Signed-off-by: Tetiana Kravchenko * update changelog message Signed-off-by: Tetiana Kravchenko * clean up changelog after rebase Signed-off-by: Tetiana Kravchenko (cherry picked from commit 2917392a019306b57523d22435b5af756bcbe770) # Conflicts: # libbeat/common/kubernetes/metadata/pod_test.go * Update CHANGELOG.next.asciidoc * Fix conflict in tests Co-authored-by: Tetiana Kravchenko --- CHANGELOG.next.asciidoc | 2 +- .../common/kubernetes/metadata/metadata.go | 11 ++++++--- .../common/kubernetes/metadata/node_test.go | 23 +++++++++++++++---- libbeat/common/kubernetes/metadata/pod.go | 2 +- .../kubernetes/metadata/service_test.go | 15 ++++++++---- 5 files changed, 40 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e66ae58f2c1..751357c6a1c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -11,7 +11,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* - Add job.name in pods controlled by Jobs {pull}28954[28954] -- Change Docker base image from CentOS 7 to Ubuntu 20.04 {pull}29681[29681] +- Enrich kubernetes metadata with node annotations. {pull}29605[29605] *Auditbeat* diff --git a/libbeat/common/kubernetes/metadata/metadata.go b/libbeat/common/kubernetes/metadata/metadata.go index 0d95d960640..9b493210255 100644 --- a/libbeat/common/kubernetes/metadata/metadata.go +++ b/libbeat/common/kubernetes/metadata/metadata.go @@ -72,11 +72,16 @@ func WithFields(key string, value interface{}) FieldOptions { } } -// WithLabels FieldOption allows adding labels under sub-resource(kind) +// WithMetadata FieldOption allows adding labels and annotations under sub-resource(kind) // example if kind=namespace namespace.labels key will be added -func WithLabels(kind string) FieldOptions { +func WithMetadata(kind string) FieldOptions { return func(meta common.MapStr) { - safemapstr.Put(meta, strings.ToLower(kind)+".labels", meta["labels"]) + if meta["labels"] != nil { + safemapstr.Put(meta, strings.ToLower(kind)+".labels", meta["labels"]) + } + if meta["annotations"] != nil { + safemapstr.Put(meta, strings.ToLower(kind)+".annotations", meta["annotations"]) + } } } diff --git a/libbeat/common/kubernetes/metadata/node_test.go b/libbeat/common/kubernetes/metadata/node_test.go index b809414db56..8431adc7361 100644 --- a/libbeat/common/kubernetes/metadata/node_test.go +++ b/libbeat/common/kubernetes/metadata/node_test.go @@ -52,7 +52,10 @@ func TestNode_Generate(t *testing.T) { Labels: map[string]string{ "foo": "bar", }, - Annotations: map[string]string{}, + Annotations: map[string]string{ + "key1": "value1", + "key2": "value2", + }, }, TypeMeta: metav1.TypeMeta{ Kind: "Node", @@ -71,11 +74,16 @@ func TestNode_Generate(t *testing.T) { "labels": common.MapStr{ "foo": "bar", }, + "annotations": common.MapStr{ + "key2": "value2", + }, }}, }, } - cfg := common.NewConfig() + cfg, _ := common.NewConfigFrom(Config{ + IncludeAnnotations: []string{"key2"}, + }) metagen := NewNodeMetadataGenerator(cfg, nil, client) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -102,7 +110,9 @@ func TestNode_GenerateFromName(t *testing.T) { Labels: map[string]string{ "foo": "bar", }, - Annotations: map[string]string{}, + Annotations: map[string]string{ + "key": "value", + }, }, TypeMeta: metav1.TypeMeta{ Kind: "Node", @@ -121,12 +131,17 @@ func TestNode_GenerateFromName(t *testing.T) { "labels": common.MapStr{ "foo": "bar", }, + "annotations": common.MapStr{ + "key": "value", + }, }, }, } for _, test := range tests { - cfg := common.NewConfig() + cfg, _ := common.NewConfigFrom(Config{ + IncludeAnnotations: []string{"key"}, + }) nodes := cache.NewStore(cache.MetaNamespaceKeyFunc) nodes.Add(test.input) metagen := NewNodeMetadataGenerator(cfg, nodes, client) diff --git a/libbeat/common/kubernetes/metadata/pod.go b/libbeat/common/kubernetes/metadata/pod.go index 66138f06d9f..b0b8576f9b4 100644 --- a/libbeat/common/kubernetes/metadata/pod.go +++ b/libbeat/common/kubernetes/metadata/pod.go @@ -93,7 +93,7 @@ func (p *pod) GenerateK8s(obj kubernetes.Resource, opts ...FieldOptions) common. } if p.node != nil { - meta := p.node.GenerateFromName(po.Spec.NodeName, WithLabels("node")) + meta := p.node.GenerateFromName(po.Spec.NodeName, WithMetadata("node")) if meta != nil { out.Put("node", meta["node"]) } else { diff --git a/libbeat/common/kubernetes/metadata/service_test.go b/libbeat/common/kubernetes/metadata/service_test.go index f279fc04f50..bcd9a313edf 100644 --- a/libbeat/common/kubernetes/metadata/service_test.go +++ b/libbeat/common/kubernetes/metadata/service_test.go @@ -279,7 +279,9 @@ func TestService_GenerateWithNamespace(t *testing.T) { Labels: map[string]string{ "nskey": "nsvalue", }, - Annotations: map[string]string{}, + Annotations: map[string]string{ + "ns.annotation": "value", + }, }, TypeMeta: metav1.TypeMeta{ Kind: "Namespace", @@ -309,21 +311,26 @@ func TestService_GenerateWithNamespace(t *testing.T) { "namespace_labels": common.MapStr{ "nskey": "nsvalue", }, + "namespace_annotations": common.MapStr{ + "ns_annotation": "value", + }, }, }, }, } for _, test := range tests { - cfg := common.NewConfig() + nsConfig, _ := common.NewConfigFrom(map[string]interface{}{ + "include_annotations": []string{"ns.annotation"}, + }) services := cache.NewStore(cache.MetaNamespaceKeyFunc) services.Add(test.input) namespaces := cache.NewStore(cache.MetaNamespaceKeyFunc) namespaces.Add(test.namespace) - nsMeta := NewNamespaceMetadataGenerator(cfg, namespaces, client) + nsMeta := NewNamespaceMetadataGenerator(nsConfig, namespaces, client) - metagen := NewServiceMetadataGenerator(cfg, services, nsMeta, client) + metagen := NewServiceMetadataGenerator(nsConfig, services, nsMeta, client) t.Run(test.name, func(t *testing.T) { assert.Equal(t, test.output, metagen.Generate(test.input)) }) From 4b7f031cbc5297f880ff70d3bbb12487ed0a191e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 20 Jan 2022 15:51:56 +0100 Subject: [PATCH 22/30] [7.17](backport #29913) [Metricbeat] gcp.gke: fix overview dashboard (#29914) Co-authored-by: endorama <526307+endorama@users.noreply.github.com> Co-authored-by: Edoardo Tenani --- CHANGELOG.next.asciidoc | 1 + .../1ae960c0-f9f8-11eb-bc38-79936db7c106.json | 516 +++++++++--------- 2 files changed, 259 insertions(+), 258 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 751357c6a1c..c858560755b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -60,6 +60,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix in rename processor to ingest metrics for `write.iops` to proper field instead of `write_iops` in rds metricset. {pull}28960[28960] - Enhance filter check in kubernetes event metricset. {pull}29470[29470] - Fix gcp metrics metricset apply aligner to all metric_types {pull}29514[29513] +- Fixed GCP GKE Overview dashboard {pull}29913[29913] *Packetbeat* diff --git a/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json b/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json index 251cc7aba6a..5e2593a51df 100644 --- a/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json +++ b/x-pack/metricbeat/module/gcp/_meta/kibana/7/dashboard/1ae960c0-f9f8-11eb-bc38-79936db7c106.json @@ -98,7 +98,7 @@ }, "panelIndex": "9d604bbc-ce5e-49c8-b961-d974fa9d7891", "type": "visualization", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -167,7 +167,7 @@ "panelIndex": "a4a26c8f-3415-4cb2-a44e-27fe2e706862", "title": "Clusters", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -236,7 +236,7 @@ "panelIndex": "6018a29a-f6f0-4dec-9940-9094b3ed841d", "title": "Nodes", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -305,7 +305,7 @@ "panelIndex": "6f1f7601-f921-4051-899d-10fda75d07df", "title": "Namespaces", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -374,7 +374,7 @@ "panelIndex": "1d22e757-a6ec-43df-a60f-decda1d057c2", "title": "Pods", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -451,7 +451,7 @@ "panelIndex": "7541ca7c-3333-4065-9d98-f8fa11c29ebf", "title": "Container restarts", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -487,7 +487,7 @@ }, "panelIndex": "00e7a3a4-e042-4f46-8637-3159cd608047", "type": "visualization", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -523,7 +523,7 @@ }, "panelIndex": "20e84709-926a-4588-a76c-dd9c5583873a", "type": "visualization", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -661,7 +661,7 @@ "panelIndex": "10a4fa84-84c1-45ac-921a-7d4e7ba0a461", "title": "Pods per cluster", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -802,7 +802,7 @@ "panelIndex": "8261db16-766d-4c27-b988-87c90ed067d0", "title": "Nodes per cluster", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -991,7 +991,156 @@ "panelIndex": "1df880b1-44bc-468b-aba7-0cec27b74b12", "title": "CPU usage by Pod (seconds)", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" + }, + { + "embeddableConfig": { + "attributes": { + "references": [ + { + "id": "metricbeat-*", + "name": "indexpattern-datasource-current-indexpattern", + "type": "index-pattern" + }, + { + "id": "metricbeat-*", + "name": "indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "type": "index-pattern" + } + ], + "state": { + "datasourceStates": { + "indexpattern": { + "layers": { + "f4259765-f8eb-47de-8472-a04528f8219e": { + "columnOrder": [ + "18e2e114-77eb-4a85-afdb-ddd837e6f05a", + "3153e211-b16a-4f92-b775-6d06a4edaf44", + "61256570-b7dd-4bec-b73d-d12d993ae091" + ], + "columns": { + "18e2e114-77eb-4a85-afdb-ddd837e6f05a": { + "dataType": "string", + "isBucketed": true, + "label": "Top values of gcp.labels.resource.pod_name", + "operationType": "terms", + "params": { + "missingBucket": false, + "orderBy": { + "columnId": "61256570-b7dd-4bec-b73d-d12d993ae091", + "type": "column" + }, + "orderDirection": "desc", + "otherBucket": true, + "size": 100 + }, + "scale": "ordinal", + "sourceField": "gcp.labels.resource.pod_name" + }, + "3153e211-b16a-4f92-b775-6d06a4edaf44": { + "dataType": "date", + "isBucketed": true, + "label": "@timestamp", + "operationType": "date_histogram", + "params": { + "interval": "60s" + }, + "scale": "interval", + "sourceField": "@timestamp" + }, + "61256570-b7dd-4bec-b73d-d12d993ae091": { + "dataType": "number", + "isBucketed": false, + "label": "Median of gcp.gke.container.memory.limit_utilization.pct", + "operationType": "median", + "params": { + "format": { + "id": "percent", + "params": { + "decimals": 0 + } + } + }, + "scale": "ratio", + "sourceField": "gcp.gke.container.memory.limit_utilization.pct" + } + }, + "incompleteColumns": {} + } + } + } + }, + "filters": [], + "query": { + "language": "kuery", + "query": "" + }, + "visualization": { + "axisTitlesVisibilitySettings": { + "x": false, + "yLeft": false, + "yRight": true + }, + "fittingFunction": "Linear", + "gridlinesVisibilitySettings": { + "x": true, + "yLeft": true, + "yRight": true + }, + "layers": [ + { + "accessors": [ + "61256570-b7dd-4bec-b73d-d12d993ae091" + ], + "layerId": "f4259765-f8eb-47de-8472-a04528f8219e", + "layerType": "data", + "position": "top", + "seriesType": "line", + "showGridlines": false, + "splitAccessor": "18e2e114-77eb-4a85-afdb-ddd837e6f05a", + "xAccessor": "3153e211-b16a-4f92-b775-6d06a4edaf44" + } + ], + "legend": { + "isVisible": false, + "position": "bottom", + "showSingleSeries": false + }, + "preferredSeriesType": "line", + "tickLabelsVisibilitySettings": { + "x": true, + "yLeft": true, + "yRight": true + }, + "valueLabels": "hide", + "yLeftExtent": { + "lowerBound": 0, + "mode": "custom", + "upperBound": 1 + }, + "yRightExtent": { + "mode": "full" + } + } + }, + "title": "", + "type": "lens", + "visualizationType": "lnsXY" + }, + "enhancements": {}, + "hidePanelTitles": false + }, + "gridData": { + "h": 9, + "i": "ce28d9df-4506-4020-b2de-6274ac0d46b7", + "w": 12, + "x": 12, + "y": 21 + }, + "panelIndex": "ce28d9df-4506-4020-b2de-6274ac0d46b7", + "title": "CPU limit utilization by Pod", + "type": "lens", + "version": "7.16.2" }, { "embeddableConfig": { @@ -1132,7 +1281,7 @@ "panelIndex": "f3275c69-84ce-4c6d-bd49-3cd6f1c606f9", "title": "CPU usage per Node (seconds)", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1331,7 +1480,7 @@ "panelIndex": "cadc827f-4efb-4045-b98b-7395264e4c16", "title": "Memory usage", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1485,7 +1634,7 @@ "panelIndex": "5551e0b7-722f-401c-90fd-a0094e919618", "title": "Memory usage", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1498,7 +1647,7 @@ }, { "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "name": "indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", "type": "index-pattern" } ], @@ -1506,14 +1655,25 @@ "datasourceStates": { "indexpattern": { "layers": { - "f4259765-f8eb-47de-8472-a04528f8219e": { + "3888dded-b04b-45ec-b466-c121715bc0c1": { "columnOrder": [ - "18e2e114-77eb-4a85-afdb-ddd837e6f05a", - "3153e211-b16a-4f92-b775-6d06a4edaf44", - "61256570-b7dd-4bec-b73d-d12d993ae091" + "18948f36-88ec-476d-8593-352b13485e53", + "118e3ebc-e414-495d-a99d-a356e436b074", + "98d45c49-c3b2-43ff-bf13-9b289ba154af" ], "columns": { - "18e2e114-77eb-4a85-afdb-ddd837e6f05a": { + "118e3ebc-e414-495d-a99d-a356e436b074": { + "dataType": "date", + "isBucketed": true, + "label": "@timestamp", + "operationType": "date_histogram", + "params": { + "interval": "60s" + }, + "scale": "interval", + "sourceField": "@timestamp" + }, + "18948f36-88ec-476d-8593-352b13485e53": { "dataType": "string", "isBucketed": true, "label": "Top values of gcp.labels.resource.pod_name", @@ -1521,42 +1681,32 @@ "params": { "missingBucket": false, "orderBy": { - "columnId": "61256570-b7dd-4bec-b73d-d12d993ae091", + "columnId": "98d45c49-c3b2-43ff-bf13-9b289ba154af", "type": "column" }, "orderDirection": "desc", "otherBucket": true, - "size": 100 + "size": 5 }, "scale": "ordinal", "sourceField": "gcp.labels.resource.pod_name" }, - "3153e211-b16a-4f92-b775-6d06a4edaf44": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "60s" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "61256570-b7dd-4bec-b73d-d12d993ae091": { + "98d45c49-c3b2-43ff-bf13-9b289ba154af": { + "customLabel": true, "dataType": "number", "isBucketed": false, - "label": "Median of gcp.gke.container.memory.limit_utilization.pct", + "label": "Median memory used", "operationType": "median", "params": { "format": { - "id": "percent", + "id": "bytes", "params": { - "decimals": 0 + "decimals": 2 } } }, "scale": "ratio", - "sourceField": "gcp.gke.container.memory.limit_utilization.pct" + "sourceField": "gcp.gke.container.memory.used.bytes" } }, "incompleteColumns": {} @@ -1584,15 +1734,15 @@ "layers": [ { "accessors": [ - "61256570-b7dd-4bec-b73d-d12d993ae091" + "98d45c49-c3b2-43ff-bf13-9b289ba154af" ], - "layerId": "f4259765-f8eb-47de-8472-a04528f8219e", + "layerId": "3888dded-b04b-45ec-b466-c121715bc0c1", "layerType": "data", "position": "top", "seriesType": "line", "showGridlines": false, - "splitAccessor": "18e2e114-77eb-4a85-afdb-ddd837e6f05a", - "xAccessor": "3153e211-b16a-4f92-b775-6d06a4edaf44" + "splitAccessor": "18948f36-88ec-476d-8593-352b13485e53", + "xAccessor": "118e3ebc-e414-495d-a99d-a356e436b074" } ], "legend": { @@ -1607,10 +1757,9 @@ "yRight": true }, "valueLabels": "hide", + "valuesInLegend": true, "yLeftExtent": { - "lowerBound": 0, - "mode": "custom", - "upperBound": 1 + "mode": "full" }, "yRightExtent": { "mode": "full" @@ -1626,15 +1775,15 @@ }, "gridData": { "h": 9, - "i": "ce28d9df-4506-4020-b2de-6274ac0d46b7", + "i": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", "w": 12, "x": 12, - "y": 21 + "y": 30 }, - "panelIndex": "ce28d9df-4506-4020-b2de-6274ac0d46b7", - "title": "CPU limit utilization by Pod", + "panelIndex": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", + "title": "Memory usage by Pod", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1848,7 +1997,7 @@ "panelIndex": "a1714037-fe75-468b-bfea-d4a8e1769cbf", "title": "Network traffic (bytes count)", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -1901,7 +2050,7 @@ "label": "Media received bytes count", "operationType": "median", "scale": "ratio", - "sourceField": "gcp.gke.node.network.received_bytes.count" + "sourceField": "gcp.gke.node.network.received.bytes" } }, "incompleteColumns": {} @@ -1950,7 +2099,7 @@ "panelIndex": "f09d76ba-490b-4392-b6cb-051e4fcc03c9", "title": "Inbound network traffic top nodes", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2118,7 +2267,7 @@ "panelIndex": "eea592ea-3598-4104-96b6-ea33a0d9845d", "title": "Storage requests", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2131,7 +2280,7 @@ }, { "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", + "name": "indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", "type": "index-pattern" } ], @@ -2139,14 +2288,30 @@ "datasourceStates": { "indexpattern": { "layers": { - "3888dded-b04b-45ec-b466-c121715bc0c1": { + "abc1288b-8de8-4cd4-ae39-c2d9c700396e": { "columnOrder": [ - "18948f36-88ec-476d-8593-352b13485e53", - "118e3ebc-e414-495d-a99d-a356e436b074", - "98d45c49-c3b2-43ff-bf13-9b289ba154af" + "d40af55e-4c31-4a17-b71f-63ee7e598131", + "b47b55db-eb5b-4a10-8c49-c6920135fedf", + "374bc09a-188c-4cdf-b993-b31eb3754e46" ], "columns": { - "118e3ebc-e414-495d-a99d-a356e436b074": { + "374bc09a-188c-4cdf-b993-b31eb3754e46": { + "dataType": "number", + "isBucketed": false, + "label": "Maximum of gcp.gke.pod.volume.utilization.pct", + "operationType": "max", + "params": { + "format": { + "id": "percent", + "params": { + "decimals": 2 + } + } + }, + "scale": "ratio", + "sourceField": "gcp.gke.pod.volume.utilization.pct" + }, + "b47b55db-eb5b-4a10-8c49-c6920135fedf": { "dataType": "date", "isBucketed": true, "label": "@timestamp", @@ -2157,7 +2322,7 @@ "scale": "interval", "sourceField": "@timestamp" }, - "18948f36-88ec-476d-8593-352b13485e53": { + "d40af55e-4c31-4a17-b71f-63ee7e598131": { "dataType": "string", "isBucketed": true, "label": "Top values of gcp.labels.resource.pod_name", @@ -2165,7 +2330,7 @@ "params": { "missingBucket": false, "orderBy": { - "columnId": "98d45c49-c3b2-43ff-bf13-9b289ba154af", + "columnId": "374bc09a-188c-4cdf-b993-b31eb3754e46", "type": "column" }, "orderDirection": "desc", @@ -2174,23 +2339,6 @@ }, "scale": "ordinal", "sourceField": "gcp.labels.resource.pod_name" - }, - "98d45c49-c3b2-43ff-bf13-9b289ba154af": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Median memory used", - "operationType": "median", - "params": { - "format": { - "id": "bytes", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "gcp.gke.container.memory.used.bytes" } }, "incompleteColumns": {} @@ -2218,15 +2366,14 @@ "layers": [ { "accessors": [ - "98d45c49-c3b2-43ff-bf13-9b289ba154af" + "374bc09a-188c-4cdf-b993-b31eb3754e46" ], - "layerId": "3888dded-b04b-45ec-b466-c121715bc0c1", + "layerId": "abc1288b-8de8-4cd4-ae39-c2d9c700396e", "layerType": "data", - "position": "top", "seriesType": "line", - "showGridlines": false, - "splitAccessor": "18948f36-88ec-476d-8593-352b13485e53", - "xAccessor": "118e3ebc-e414-495d-a99d-a356e436b074" + "splitAccessor": "d40af55e-4c31-4a17-b71f-63ee7e598131", + "xAccessor": "b47b55db-eb5b-4a10-8c49-c6920135fedf", + "yConfig": [] } ], "legend": { @@ -2259,15 +2406,15 @@ }, "gridData": { "h": 9, - "i": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", + "i": "17973ffd-05c1-4075-97eb-4990a3e9b61e", "w": 12, "x": 12, - "y": 30 + "y": 39 }, - "panelIndex": "610b04b2-7483-4995-b3f6-a11c09c2b2f2", - "title": "Memory usage by Pod", + "panelIndex": "17973ffd-05c1-4075-97eb-4990a3e9b61e", + "title": "Volume utilization by Pod", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2421,7 +2568,7 @@ "panelIndex": "4611dd47-5619-485a-8a19-8edcc37d2f4e", "title": "Ephemeral storage usage", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2571,154 +2718,7 @@ "panelIndex": "fe426912-61f1-4913-81d1-1dc734c50111", "title": "Ephemeral storage usage by Node", "type": "lens", - "version": "7.15.0" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "abc1288b-8de8-4cd4-ae39-c2d9c700396e": { - "columnOrder": [ - "d40af55e-4c31-4a17-b71f-63ee7e598131", - "b47b55db-eb5b-4a10-8c49-c6920135fedf", - "374bc09a-188c-4cdf-b993-b31eb3754e46" - ], - "columns": { - "374bc09a-188c-4cdf-b993-b31eb3754e46": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of gcp.gke.pod.volume.utilization.pct", - "operationType": "max", - "params": { - "format": { - "id": "percent", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "gcp.gke.pod.volume.utilization.pct" - }, - "b47b55db-eb5b-4a10-8c49-c6920135fedf": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "60s" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "d40af55e-4c31-4a17-b71f-63ee7e598131": { - "dataType": "string", - "isBucketed": true, - "label": "Top values of gcp.labels.resource.pod_name", - "operationType": "terms", - "params": { - "missingBucket": false, - "orderBy": { - "columnId": "374bc09a-188c-4cdf-b993-b31eb3754e46", - "type": "column" - }, - "orderDirection": "desc", - "otherBucket": true, - "size": 5 - }, - "scale": "ordinal", - "sourceField": "gcp.labels.resource.pod_name" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": false, - "yLeft": false, - "yRight": true - }, - "fittingFunction": "Linear", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "layers": [ - { - "accessors": [ - "374bc09a-188c-4cdf-b993-b31eb3754e46" - ], - "layerId": "abc1288b-8de8-4cd4-ae39-c2d9c700396e", - "layerType": "data", - "seriesType": "line", - "splitAccessor": "d40af55e-4c31-4a17-b71f-63ee7e598131", - "xAccessor": "b47b55db-eb5b-4a10-8c49-c6920135fedf", - "yConfig": [] - } - ], - "legend": { - "isVisible": false, - "position": "bottom", - "showSingleSeries": false - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - } - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 9, - "i": "17973ffd-05c1-4075-97eb-4990a3e9b61e", - "w": 12, - "x": 12, - "y": 39 - }, - "panelIndex": "17973ffd-05c1-4075-97eb-4990a3e9b61e", - "title": "Volume utilization by Pod", - "type": "lens", - "version": "7.15.0" + "version": "7.16.2" }, { "embeddableConfig": { @@ -2853,17 +2853,17 @@ }, "panelIndex": "6f9ca350-a898-4865-8aef-4b593bd341ff", "type": "lens", - "version": "7.15.0" + "version": "7.16.2" } ], "timeRestore": false, "title": "[Metricbeat GCP] GKE Overview", "version": 1 }, - "coreMigrationVersion": "7.15.0", + "coreMigrationVersion": "7.16.2", "id": "1ae960c0-f9f8-11eb-bc38-79936db7c106", "migrationVersion": { - "dashboard": "7.15.0" + "dashboard": "7.16.0" }, "references": [ { @@ -2966,6 +2966,16 @@ "name": "1df880b1-44bc-468b-aba7-0cec27b74b12:indexpattern-datasource-layer-0599ce9e-3c12-4f89-af4e-d094e9f68ea9", "type": "index-pattern" }, + { + "id": "metricbeat-*", + "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-current-indexpattern", + "type": "index-pattern" + }, + { + "id": "metricbeat-*", + "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "type": "index-pattern" + }, { "id": "metricbeat-*", "name": "f3275c69-84ce-4c6d-bd49-3cd6f1c606f9:indexpattern-datasource-current-indexpattern", @@ -3003,12 +3013,12 @@ }, { "id": "metricbeat-*", - "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-current-indexpattern", + "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-current-indexpattern", "type": "index-pattern" }, { "id": "metricbeat-*", - "name": "ce28d9df-4506-4020-b2de-6274ac0d46b7:indexpattern-datasource-layer-f4259765-f8eb-47de-8472-a04528f8219e", + "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", "type": "index-pattern" }, { @@ -3043,12 +3053,12 @@ }, { "id": "metricbeat-*", - "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-current-indexpattern", + "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-current-indexpattern", "type": "index-pattern" }, { "id": "metricbeat-*", - "name": "610b04b2-7483-4995-b3f6-a11c09c2b2f2:indexpattern-datasource-layer-3888dded-b04b-45ec-b466-c121715bc0c1", + "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", "type": "index-pattern" }, { @@ -3071,16 +3081,6 @@ "name": "fe426912-61f1-4913-81d1-1dc734c50111:indexpattern-datasource-layer-98ee5c53-f8e5-43ed-91a8-507010e5b0a9", "type": "index-pattern" }, - { - "id": "metricbeat-*", - "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "17973ffd-05c1-4075-97eb-4990a3e9b61e:indexpattern-datasource-layer-abc1288b-8de8-4cd4-ae39-c2d9c700396e", - "type": "index-pattern" - }, { "id": "metricbeat-*", "name": "6f9ca350-a898-4865-8aef-4b593bd341ff:indexpattern-datasource-current-indexpattern", @@ -3093,6 +3093,6 @@ } ], "type": "dashboard", - "updated_at": "2021-08-26T15:37:59.949Z", - "version": "WzIxODAsMV0=" + "updated_at": "2022-01-19T14:08:38.278Z", + "version": "WzIyMTYsMV0=" } \ No newline at end of file From b55f1ceac49d295c6050c02e336b56911f580abd Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 21 Jan 2022 02:27:33 -0500 Subject: [PATCH 23/30] [Automation] Update elastic stack version to 7.17.0-1bd58b32 for testing (#29938) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 0e476ca5829..7a36eb99fde 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-e1efbe3a-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-1bd58b32-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -17,7 +17,7 @@ services: - "indices.id_field_data.enabled=true" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.17.0-e1efbe3a-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.17.0-1bd58b32-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -27,7 +27,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.17.0-e1efbe3a-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.17.0-1bd58b32-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 39d8152b061..eaa0f11e475 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-e1efbe3a-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-1bd58b32-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -22,7 +22,7 @@ services: - "ingest.geoip.downloader.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.17.0-e1efbe3a-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.17.0-1bd58b32-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -32,7 +32,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.17.0-e1efbe3a-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.17.0-1bd58b32-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 05a46aca38a77a25949764182a2f6933bd6a6f35 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 24 Jan 2022 02:12:47 -0500 Subject: [PATCH 24/30] [Automation] Update elastic stack version to 7.17.0-ab4975a2 for testing (#29956) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 7a36eb99fde..ee3eb76b1b8 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-1bd58b32-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.17.0-ab4975a2-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -17,7 +17,7 @@ services: - "indices.id_field_data.enabled=true" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.17.0-1bd58b32-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.17.0-ab4975a2-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -27,7 +27,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.17.0-1bd58b32-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.17.0-ab4975a2-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index eaa0f11e475..ffd4c748b9f 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-1bd58b32-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0-ab4975a2-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -22,7 +22,7 @@ services: - "ingest.geoip.downloader.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.17.0-1bd58b32-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.17.0-ab4975a2-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -32,7 +32,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.17.0-1bd58b32-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.17.0-ab4975a2-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From cd16c9fa79eae6095ea68bf7d14b135655d4b85f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 24 Jan 2022 19:17:59 +1030 Subject: [PATCH 25/30] x-pack/auditbeat/module/system/socket: get full length path and arg from /proc when not available from kprobe (#29410) (#29958) Also use first arg from sysinfo.Processes in place of Name to avoid process name truncation. (cherry picked from commit d46bb5f0cd6e84faa54c66138848aff46f2417f9) Co-authored-by: Dan Kortschak <90160302+efd6@users.noreply.github.com> --- CHANGELOG.next.asciidoc | 5 ++ .../auditbeat/module/system/socket/events.go | 76 ++++++++++++++----- .../module/system/socket/socket_linux.go | 9 +++ 3 files changed, 73 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c858560755b..61b715e9f4f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -44,6 +44,11 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Auditbeat* - system/socket: Fix startup errors on newer 5.x kernels due to missing _do_fork function. {issue}29607[29607] {pull}29744[29744] +- system/package: Fix parsing of Installed-Size field of DEB packages. {issue}16661[16661] {pull}17188[17188] +- system module: Fix panic during initialisation when /proc/stat can't be read. {pull}17569[17569] +- system/package: Fix an error that can occur while trying to persist package metadata. {issue}18536[18536] {pull}18887[18887] +- system/socket: Fix bugs leading to wrong process being attributed to flows. {pull}29166[29166] {issue}17165[17165] +- system/socket: Fix process name and arg truncation for long names, paths and args lists. {issue}24667[24667] {pull}29410[29410] *Filebeat* diff --git a/x-pack/auditbeat/module/system/socket/events.go b/x-pack/auditbeat/module/system/socket/events.go index d0c37701d54..74b31cc7e7f 100644 --- a/x-pack/auditbeat/module/system/socket/events.go +++ b/x-pack/auditbeat/module/system/socket/events.go @@ -859,8 +859,10 @@ func (e *inetReleaseCall) Update(s *state) error { // kernels it needs to dump fixed-size arrays in 8-byte chunks. As the total // number of fetchargs available is limited, we have to dump only the first // 128 bytes of every argument. -const maxProgArgLen = 128 -const maxProgArgs = 5 +const ( + maxProgArgLen = 128 + maxProgArgs = 5 +) type execveCall struct { Meta tracing.Metadata `kprobe:"metadata"` @@ -880,38 +882,78 @@ type execveCall struct { func (e *execveCall) getProcess() *process { p := &process{ pid: e.Meta.PID, - path: readCString(e.Path[:]), created: kernelTime(e.Meta.Timestamp), } - p.name = filepath.Base(p.path) - var argc int - for argc = 0; argc <= maxProgArgs; argc++ { - if e.Ptrs[argc] == 0 { - break + + if idx := bytes.IndexByte(e.Path[:], 0); idx >= 0 { + // Fast path if we already have the path. + p.path = string(e.Path[:idx]) + } else { + // Attempt to get the path from the /prox//exe symlink. + var err error + p.path, err = filepath.EvalSymlinks(fmt.Sprintf("/proc/%d/exe", e.Meta.PID)) + if err != nil { + if pe, ok := err.(*os.PathError); ok && strings.Contains(pe.Path, "(deleted)") { + // Keep the deleted path from the PathError. + p.path = pe.Path + } else { + // Fallback to the truncated path. + p.path = string(e.Path[:]) + " ..." + } } } - p.args = make([]string, argc) - params := [maxProgArgs][]byte{ + + // Check for truncation of arg list or arguments. + params := [...][]byte{ e.Param0[:], e.Param1[:], e.Param2[:], e.Param3[:], e.Param4[:], } - limit := argc - if limit > maxProgArgs { - limit = maxProgArgs - p.args[limit] = "..." + var ( + argc int + truncatedArg bool + ) + for argc = 0; argc < len(e.Ptrs); argc++ { + if e.Ptrs[argc] == 0 { + break + } + if argc < len(params) && bytes.IndexByte(params[argc], 0) < 0 { + truncatedArg = true + } + } + if argc > maxProgArgs || truncatedArg { + // Attempt to get complete args list from /proc//cmdline. + cmdline, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", e.Meta.PID)) + if err == nil { + p.args = strings.Split(strings.TrimRight(string(cmdline), "\x00"), "\x00") + } } - for i := 0; i < limit; i++ { - p.args[i] = readCString(params[i]) + + if p.args == nil { + // Fallback to arg list if unsuccessful or no truncation. + p.args = make([]string, argc) + if argc > maxProgArgs { + argc = maxProgArgs + p.args[argc] = "..." + } + for i, par := range params[:argc] { + p.args[i] = readCString(par) + } } - if p.hasCreds = e.creds != nil; p.hasCreds { + + // Get name from first argument. + p.name = filepath.Base(p.args[0]) + + if e.creds != nil { + p.hasCreds = true p.uid = e.creds.UID p.gid = e.creds.GID p.euid = e.creds.EUID p.egid = e.creds.EGID } + return p } diff --git a/x-pack/auditbeat/module/system/socket/socket_linux.go b/x-pack/auditbeat/module/system/socket/socket_linux.go index e7f1a059dd1..fb66f717d84 100644 --- a/x-pack/auditbeat/module/system/socket/socket_linux.go +++ b/x-pack/auditbeat/module/system/socket/socket_linux.go @@ -187,6 +187,15 @@ func (m *MetricSet) Run(r mb.PushReporterV2) { } else { for _, p := range procs { if i, err := p.Info(); err == nil { + if len(i.Name) == 16 && len(i.Args) != 0 { + // github.com/prometheus/procfs uses /proc//stat for + // the process name which is truncated to 16 bytes, so get + // the name from the cmdline data if it might be truncated. + // The guard for length of i.Args is for cases where there + // is no command line reported by proc fs; this should never + // happen, but does. + i.Name = filepath.Base(i.Args[0]) + } process := &process{ name: i.Name, pid: uint32(i.PID), From c6bec41fa8d1fd1447a299d717412f8e07024671 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 24 Jan 2022 21:34:17 +0000 Subject: [PATCH 26/30] ci: docker login step for pulling then pushing (#29960) (#29963) (cherry picked from commit 02c9bef29564fbb358832d6bf9a5182d32f365b3) Co-authored-by: Victor Martinez --- .ci/packaging.groovy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 6763c84fd42..f51976dae9d 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -307,8 +307,6 @@ def tagAndPush(Map args = [:]) { tagName = "pr-${env.CHANGE_ID}" } - dockerLogin(secret: "${DOCKERELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") - // supported tags def tags = [tagName, "${env.GIT_BASE_COMMIT}"] if (!isPR() && aliasVersion != "") { @@ -380,6 +378,7 @@ def release(){ withEnv([ "DEV=true" ]) { + dockerLogin(secret: "${DOCKERELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") dir("${env.BEATS_FOLDER}") { sh(label: "Release ${env.BEATS_FOLDER} ${env.PLATFORMS}", script: 'mage package') } From 599f0c3d57933df1174de21a20e7cfb8186d6a89 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 24 Jan 2022 18:25:34 -0500 Subject: [PATCH 27/30] Improve aws-s3 gzip file detection to avoid false negatives (#29969) (#29974) Directly check the byte stream for the gzip magic number and deflate compression type. Avoid using http.DetectContentType because it returns the first match it finds while checking many signatures. Closes #29968 (cherry picked from commit 61a7d368e8d89b28b5430785bc51ce4cb25afb7b) Co-authored-by: Andrew Kroh --- CHANGELOG.next.asciidoc | 6 ++++++ x-pack/filebeat/input/awss3/s3_objects.go | 12 +++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 61b715e9f4f..0c5c55ce782 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -53,6 +53,12 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Filebeat* - aws-s3: Stop trying to increase SQS message visibility after ReceiptHandleIsInvalid errors. {pull}29480[29480] +- Fix handling of IPv6 addresses in netflow flow events. {issue}19210[19210] {pull}29383[29383] +- Fix `sophos` KV splitting and syslog header handling {issue}24237[24237] {pull}29331[29331] +- Undo deletion of endpoint config from cloudtrail fileset in {pull}29415[29415]. {pull}29450[29450] +- Make Cisco ASA and FTD modules conform to the ECS definition for event.outcome and event.type. {issue}29581[29581] {pull}29698[29698] +- ibmmq: Fixed `@timestamp` not being populated with correct values. {pull}29773[29773] +- aws-s3: Improve gzip detection to avoid false negatives. {issue}29968[29968] *Heartbeat* diff --git a/x-pack/filebeat/input/awss3/s3_objects.go b/x-pack/filebeat/input/awss3/s3_objects.go index ca5af007292..bcc6138cb3e 100644 --- a/x-pack/filebeat/input/awss3/s3_objects.go +++ b/x-pack/filebeat/input/awss3/s3_objects.go @@ -15,7 +15,6 @@ import ( "fmt" "io" "io/ioutil" - "net/http" "reflect" "strings" "time" @@ -375,18 +374,13 @@ func s3ObjectHash(obj s3EventV2) string { // stream without consuming it. This makes it convenient for code executed after this function call // to consume the stream if it wants. func isStreamGzipped(r *bufio.Reader) (bool, error) { - // Why 512? See https://godoc.org/net/http#DetectContentType - buf, err := r.Peek(512) + buf, err := r.Peek(3) if err != nil && err != io.EOF { return false, err } - switch http.DetectContentType(buf) { - case "application/x-gzip", "application/zip": - return true, nil - default: - return false, nil - } + // gzip magic number (1f 8b) and the compression method (08 for DEFLATE). + return bytes.HasPrefix(buf, []byte{0x1F, 0x8B, 0x08}), nil } // s3Metadata returns a map containing the selected S3 object metadata keys. From 85a8e246ded1e4c68eb422694957969cc353c5e9 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 25 Jan 2022 13:58:33 +0100 Subject: [PATCH 28/30] Add clarification about enableing dashboard loading (#29985) (#29989) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit (cherry picked from commit a84302d99b7be0824eed8d4edcbd550709011f68) Co-authored-by: Noémi Ványi --- libbeat/docs/dashboardsconfig.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libbeat/docs/dashboardsconfig.asciidoc b/libbeat/docs/dashboardsconfig.asciidoc index 7bed3d256f8..d720d7e8f7e 100644 --- a/libbeat/docs/dashboardsconfig.asciidoc +++ b/libbeat/docs/dashboardsconfig.asciidoc @@ -46,6 +46,9 @@ You can specify the following options in the `setup.dashboards` section of the If this option is set to true, {beatname_uc} loads the sample Kibana dashboards from the local `kibana` directory in the home path of the {beatname_uc} installation. +NOTE: {beatname_uc} loads dashboards on startup if either `enabled` is set to `true` +or the `setup.dashboards` section is included in the configuration. + NOTE: When dashboard loading is enabled, {beatname_uc} overwrites any existing dashboards that match the names of the dashboards you are loading. This happens every time {beatname_uc} starts. From 2eede7c0b180e6852fdc41bef62b887207d7ca31 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 25 Jan 2022 07:32:22 -0600 Subject: [PATCH 29/30] [Heartbeat] Change size of data on ICMP packet (#29948) (#29978) * [Heartbeat] Change size of data on ICMP packet * Add CHANGELOG entry. Co-authored-by: Guillaume Marsay Co-authored-by: Justin Kambic (cherry picked from commit da9720cca91f0f22c190ddd9514a950cdb37806b) Co-authored-by: Guillaume Marsay --- CHANGELOG.next.asciidoc | 1 + heartbeat/monitors/active/icmp/stdloop.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0c5c55ce782..1d6bc17d729 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -22,6 +22,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix broken macOS ICMP python e2e test. {pull}29900[29900] - Only add monitor.status to browser events when summary. {pull}29460[29460] - Also add summary to journeys for which the synthetics runner crashes. {pull}29606[29606] +- Update size of ICMP packets to adhere to standard min size. {pull}29948[29948] *Metricbeat* diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index 9f5f5543967..6f76b256353 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -346,7 +346,7 @@ func (l *stdICMPLoop) sendEchoRequest(addr *net.IPAddr) (*requestContext, error) l.requests[id] = ctx l.mutex.Unlock() - payloadBuf := make([]byte, 0, 8) + payloadBuf := make([]byte, 48, 48) payload := bytes.NewBuffer(payloadBuf) ts := time.Now() binary.Write(payload, binary.BigEndian, ts.UnixNano()) From ca2bd68e88b9ed87907e52d5d68bd92e70c6198f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 25 Jan 2022 15:24:19 -0500 Subject: [PATCH 30/30] [7.17](backport #29966) Add the Elastic product origin header when talking to Elasticsearch or Kibana. (#30000) Add the Elastic product origin header when talking to Elasticsearch or Kibana. (#29966) Set the beats product origin header by default when communicating with Elasticsearch or Kibana. (cherry picked from commit 5f3dd3e39deb2f062a5b051d92203d2444366825) # Conflicts: # metricbeat/module/kibana/settings/settings.go # metricbeat/module/kibana/stats/stats.go # metricbeat/module/kibana/status/status.go Co-authored-by: Craig MacKenzie --- libbeat/common/productorigin/productorigin.go | 29 +++++++++++++++++++ libbeat/esleg/eslegclient/connection.go | 22 +++++++------- libbeat/esleg/eslegclient/connection_test.go | 21 +++++++++----- metricbeat/module/elasticsearch/metricset.go | 3 ++ metricbeat/module/kibana/stats/stats.go | 3 ++ metricbeat/module/kibana/status/status.go | 3 ++ 6 files changed, 63 insertions(+), 18 deletions(-) create mode 100644 libbeat/common/productorigin/productorigin.go diff --git a/libbeat/common/productorigin/productorigin.go b/libbeat/common/productorigin/productorigin.go new file mode 100644 index 00000000000..133442fae90 --- /dev/null +++ b/libbeat/common/productorigin/productorigin.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package productorigin defines the Elastic product origin header. +package productorigin + +const ( + // Identifies a request as originating from an Elastic product. Has the side effect of + // suppressing Elasticsearch API deprecation warnings in Kibana when set. + Header = "X-Elastic-Product-Origin" + + // Applicable values from https://github.com/elastic/kibana/blob/main/x-pack/plugins/upgrade_assistant/common/constants.ts#L50 + Observability = "observability" + Beats = "beats" +) diff --git a/libbeat/esleg/eslegclient/connection.go b/libbeat/esleg/eslegclient/connection.go index fdba8b27744..9cd4b94f563 100644 --- a/libbeat/esleg/eslegclient/connection.go +++ b/libbeat/esleg/eslegclient/connection.go @@ -30,6 +30,7 @@ import ( "go.elastic.co/apm/module/apmelasticsearch" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/libbeat/common/transport" "github.com/elastic/beats/v7/libbeat/common/transport/httpcommon" "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" @@ -84,7 +85,9 @@ type ConnectionSettings struct { func NewConnection(s ConnectionSettings) (*Connection, error) { logger := logp.NewLogger("esclientleg") - s = settingsWithDefaults(s) + if s.IdleConnTimeout == 0 { + s.IdleConnTimeout = 1 * time.Minute + } u, err := url.Parse(s.URL) if err != nil { @@ -117,6 +120,14 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { } userAgent := useragent.UserAgent(s.Beatname, true) + // Default the product origin header to beats if it wasn't already set. + if _, ok := s.Headers[productorigin.Header]; !ok { + if s.Headers == nil { + s.Headers = make(map[string]string) + } + s.Headers[productorigin.Header] = productorigin.Beats + } + httpClient, err := s.Transport.Client( httpcommon.WithLogger(logger), httpcommon.WithIOStats(s.Observer), @@ -155,15 +166,6 @@ func NewConnection(s ConnectionSettings) (*Connection, error) { return &conn, nil } -func settingsWithDefaults(s ConnectionSettings) ConnectionSettings { - settings := s - if settings.IdleConnTimeout == 0 { - settings.IdleConnTimeout = 1 * time.Minute - } - - return settings -} - // NewClients returns a list of Elasticsearch clients based on the given // configuration. It accepts the same configuration parameters as the Elasticsearch // output, except for the output specific configuration options. If multiple hosts diff --git a/libbeat/esleg/eslegclient/connection_test.go b/libbeat/esleg/eslegclient/connection_test.go index e0735ebe992..af553d71c09 100644 --- a/libbeat/esleg/eslegclient/connection_test.go +++ b/libbeat/esleg/eslegclient/connection_test.go @@ -25,6 +25,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/libbeat/common/productorigin" ) func TestAPIKeyEncoding(t *testing.T) { @@ -71,18 +73,21 @@ func TestHeaders(t *testing.T) { expected map[string][]string }{ {input: map[string]string{ - "Accept": "application/vnd.elasticsearch+json;compatible-with=7", - "Content-Type": "application/vnd.elasticsearch+json;compatible-with=7", - "X-My-Header": "true"}, + "Accept": "application/vnd.elasticsearch+json;compatible-with=7", + "Content-Type": "application/vnd.elasticsearch+json;compatible-with=7", + productorigin.Header: "elastic-product", + "X-My-Header": "true"}, expected: map[string][]string{ - "Accept": {"application/vnd.elasticsearch+json;compatible-with=7"}, - "Content-Type": {"application/vnd.elasticsearch+json;compatible-with=7"}, - "X-My-Header": {"true"}}}, + "Accept": {"application/vnd.elasticsearch+json;compatible-with=7"}, + "Content-Type": {"application/vnd.elasticsearch+json;compatible-with=7"}, + productorigin.Header: {"elastic-product"}, + "X-My-Header": {"true"}}}, {input: map[string]string{ "X-My-Header": "true"}, expected: map[string][]string{ - "Accept": {"application/json"}, - "X-My-Header": {"true"}}}, + "Accept": {"application/json"}, + productorigin.Header: {productorigin.Beats}, + "X-My-Header": {"true"}}}, } { conn, err := NewConnection(ConnectionSettings{ Headers: td.input, diff --git a/metricbeat/module/elasticsearch/metricset.go b/metricbeat/module/elasticsearch/metricset.go index 22b4b2c6c49..79e1143df63 100644 --- a/metricbeat/module/elasticsearch/metricset.go +++ b/metricbeat/module/elasticsearch/metricset.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -83,6 +84,8 @@ func NewMetricSet(base mb.BaseMetricSet, servicePath string) (*MetricSet, error) return nil, err } + http.SetHeaderDefault(productorigin.Header, productorigin.Beats) + config := struct { XPack bool `config:"xpack.enabled"` Scope Scope `config:"scope"` diff --git a/metricbeat/module/kibana/stats/stats.go b/metricbeat/module/kibana/stats/stats.go index 0335e814fd4..60d91a76bbc 100644 --- a/metricbeat/module/kibana/stats/stats.go +++ b/metricbeat/module/kibana/stats/stats.go @@ -22,6 +22,7 @@ import ( "strings" "time" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -106,6 +107,8 @@ func (m *MetricSet) init() error { return err } + statsHTTP.SetHeaderDefault(productorigin.Header, productorigin.Beats) + kibanaVersion, err := kibana.GetVersion(statsHTTP, statsPath) if err != nil { return err diff --git a/metricbeat/module/kibana/status/status.go b/metricbeat/module/kibana/status/status.go index c386ccb0010..5ed597d9edc 100644 --- a/metricbeat/module/kibana/status/status.go +++ b/metricbeat/module/kibana/status/status.go @@ -20,6 +20,7 @@ package status import ( "fmt" + "github.com/elastic/beats/v7/libbeat/common/productorigin" "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -65,6 +66,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } + http.SetHeaderDefault(productorigin.Header, productorigin.Beats) + return &MetricSet{ ms, http,