From ac00dd28136351902851fbaa057a706e282c536f Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 15 Oct 2020 07:16:06 -0600 Subject: [PATCH 01/93] Disable TestFetch in s3_request_integration_test.go (#21828) * Disable TestFetch in s3_request_integration_test.go * remove debug leftover --- .../aws/s3_daily_storage/s3_daily_storage_integration_test.go | 1 - .../module/aws/s3_request/s3_request_integration_test.go | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/metricbeat/module/aws/s3_daily_storage/s3_daily_storage_integration_test.go b/x-pack/metricbeat/module/aws/s3_daily_storage/s3_daily_storage_integration_test.go index 4042f332b8e..a398926b462 100644 --- a/x-pack/metricbeat/module/aws/s3_daily_storage/s3_daily_storage_integration_test.go +++ b/x-pack/metricbeat/module/aws/s3_daily_storage/s3_daily_storage_integration_test.go @@ -33,7 +33,6 @@ func TestFetch(t *testing.T) { mtest.CheckEventField("aws.dimensions.StorageType", "string", event, t) mtest.CheckEventField("aws.s3.metrics.BucketSizeBytes.avg", "float", event, t) mtest.CheckEventField("aws.s3.metrics.NumberOfObjects.avg", "float", event, t) - break } } diff --git a/x-pack/metricbeat/module/aws/s3_request/s3_request_integration_test.go b/x-pack/metricbeat/module/aws/s3_request/s3_request_integration_test.go index 8103acd13a4..c7b37de2af3 100644 --- a/x-pack/metricbeat/module/aws/s3_request/s3_request_integration_test.go +++ b/x-pack/metricbeat/module/aws/s3_request/s3_request_integration_test.go @@ -17,6 +17,7 @@ import ( ) func TestFetch(t *testing.T) { + t.Skip("flaky test: https://github.com/elastic/beats/issues/21826") config := mtest.GetConfigForTest(t, "s3_request", "60s") metricSet := mbtest.NewReportingMetricSetV2Error(t, config) From 8de1a7d9a93919a1236466749620546352d9029d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Thu, 15 Oct 2020 16:52:51 +0200 Subject: [PATCH 02/93] chore: simplify triggering the E2E tests for Beats (#21790) * chore: pass beat as a method argument (no side-effects) * chore: run tests in a separate stage * fix: use parenthesis * chore: update comment * chore: do not trigger E2E tests if no suite was added * fix: use missing curly brackets * fix: wrong closure wrapping * fix: condition was not set --- .ci/packaging.groovy | 80 +++++++++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 24 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 37eeaa7d223..f8a64f525c6 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -2,6 +2,13 @@ @Library('apm@current') _ +import groovy.transform.Field + +/** + This is required to store the test suites we will use to trigger the E2E tests. +*/ +@Field def e2eTestSuites = [] + pipeline { agent none environment { @@ -121,7 +128,7 @@ pipeline { release() pushCIDockerImages() } - runE2ETestForPackages() + prepareE2ETestForPackage("${BEATS_FOLDER}") } } stage('Package Mac OS'){ @@ -152,6 +159,13 @@ pipeline { } } } + stage('Run E2E Tests for Packages'){ + agent { label 'ubuntu && immutable' } + options { skipDefaultCheckout() } + steps { + runE2ETests() + } + } } } } @@ -208,7 +222,7 @@ def tagAndPush(name){ def commitName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${env.GIT_BASE_COMMIT}" def iterations = 0 - retryWithSleep(retries: 3, seconds: 5, backoff: true) + retryWithSleep(retries: 3, seconds: 5, backoff: true) { iterations++ def status = sh(label:'Change tag and push', script: """ docker tag ${oldName} ${newName} @@ -217,30 +231,27 @@ def tagAndPush(name){ docker push ${commitName} """, returnStatus: true) - if ( status > 0 && iterations < 3) { - error('tag and push failed, retry') - } else if ( status > 0 ) { - log(level: 'WARN', text: "${name} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") + if ( status > 0 && iterations < 3) { + error('tag and push failed, retry') + } else if ( status > 0 ) { + log(level: 'WARN', text: "${name} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") + } } } } -def runE2ETestForPackages(){ - def suite = '' - - catchError(buildResult: 'UNSTABLE', message: 'Unable to run e2e tests', stageResult: 'FAILURE') { - if ("${env.BEATS_FOLDER}" == "filebeat" || "${env.BEATS_FOLDER}" == "x-pack/filebeat") { - suite = 'helm,fleet' - } else if ("${env.BEATS_FOLDER}" == "metricbeat" || "${env.BEATS_FOLDER}" == "x-pack/metricbeat") { - suite = '' - } else if ("${env.BEATS_FOLDER}" == "x-pack/elastic-agent") { - suite = 'fleet' - } else { - echo("Skipping E2E tests for ${env.BEATS_FOLDER}.") - return - } - - triggerE2ETests(suite) +def prepareE2ETestForPackage(String beat){ + if ("${beat}" == "filebeat" || "${beat}" == "x-pack/filebeat") { + e2eTestSuites.push('fleet') + e2eTestSuites.push('helm') + } else if ("${beat}" == "metricbeat" || "${beat}" == "x-pack/metricbeat") { + e2eTestSuites.push('ALL') + echo("${beat} adds all test suites to the E2E tests job.") + } else if ("${beat}" == "x-pack/elastic-agent") { + e2eTestSuites.push('fleet') + } else { + echo("${beat} does not add any test suite to the E2E tests job.") + return } } @@ -257,8 +268,29 @@ def release(){ } } +def runE2ETests(){ + if (e2eTestSuites.size() == 0) { + echo("Not triggering E2E tests for PR-${env.CHANGE_ID} because the changes does not affect the E2E.") + return + } + + def suites = '' // empty value represents all suites in the E2E tests + + catchError(buildResult: 'UNSTABLE', message: 'Unable to run e2e tests', stageResult: 'FAILURE') { + def suitesSet = e2eTestSuites.toSet() + + if (!suitesSet.contains('ALL')) { + suitesSet.each { suite -> + suites += "${suite}," + }; + } + + triggerE2ETests(suites) + } +} + def triggerE2ETests(String suite) { - echo("Triggering E2E tests for ${env.BEATS_FOLDER}. Test suite: ${suite}.") + echo("Triggering E2E tests for PR-${env.CHANGE_ID}. Test suites: ${suite}.") def branchName = isPR() ? "${env.CHANGE_TARGET}" : "${env.JOB_BASE_NAME}" def e2eTestsPipeline = "e2e-tests/e2e-testing-mbp/${branchName}" @@ -285,7 +317,7 @@ def triggerE2ETests(String suite) { wait: false ) - def notifyContext = "${env.GITHUB_CHECK_E2E_TESTS_NAME} for ${env.BEATS_FOLDER}" + def notifyContext = "${env.GITHUB_CHECK_E2E_TESTS_NAME}" githubNotify(context: "${notifyContext}", description: "${notifyContext} ...", status: 'PENDING', targetUrl: "${env.JENKINS_URL}search/?q=${e2eTestsPipeline.replaceAll('/','+')}") } From 4e06214b4947d19a6b7d41b21995024d22f18111 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 15 Oct 2020 16:23:53 +0100 Subject: [PATCH 03/93] [test] disable elasticsearch_kerberos.elastic container (#21846) --- libbeat/docker-compose.yml | 62 ++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/libbeat/docker-compose.yml b/libbeat/docker-compose.yml index ebd23373325..c96b40e3ea8 100644 --- a/libbeat/docker-compose.yml +++ b/libbeat/docker-compose.yml @@ -26,7 +26,8 @@ services: - ES_MONITORING_HOST=elasticsearch_monitoring - ES_MONITORING_PORT=9200 - ES_HOST_SSL=elasticsearchssl - - ES_KERBEROS_HOST=elasticsearch_kerberos.elastic + # See https://github.com/elastic/beats/issues/21838 + # - ES_KERBEROS_HOST=elasticsearch_kerberos.elastic - ES_PORT_SSL=9200 - ES_SUPERUSER_USER=admin - ES_SUPERUSER_PASS=changeme @@ -43,7 +44,8 @@ services: image: busybox depends_on: elasticsearch: { condition: service_healthy } - elasticsearch_kerberos.elastic: { condition: service_healthy } + # See https://github.com/elastic/beats/issues/21838 + # elasticsearch_kerberos.elastic: { condition: service_healthy } elasticsearch_monitoring: { condition: service_healthy } elasticsearchssl: { condition: service_healthy } logstash: { condition: service_healthy } @@ -128,34 +130,34 @@ services: environment: - ADVERTISED_HOST=kafka - elasticsearch_kerberos.elastic: - build: ${ES_BEATS}/testing/environments/docker/elasticsearch_kerberos - healthcheck: - test: bash -c "/healthcheck.sh" - retries: 1200 - interval: 5s - start_period: 60s - environment: - - "TERM=linux" - - "ELASTIC_PASSWORD=changeme" - - "ES_JAVA_OPTS=-Xms512m -Xmx512m -Djava.security.krb5.conf=/etc/krb5.conf" - - "network.host=" - - "transport.host=127.0.0.1" - - "http.host=0.0.0.0" - - "xpack.security.enabled=true" - - "indices.id_field_data.enabled=true" - - "xpack.license.self_generated.type=trial" - - "xpack.security.authc.realms.kerberos.ELASTIC.order=1" - - "xpack.security.authc.realms.kerberos.ELASTIC.keytab.path=/usr/share/elasticsearch/config/HTTP_elasticsearch_kerberos.elastic.keytab" - hostname: elasticsearch_kerberos.elastic - volumes: - # This is needed otherwise there won't be enough entropy to generate a new kerberos realm - - /dev/urandom:/dev/random - ports: - - 1088 - - 1749 - - 9200 - command: bash -c "/start.sh" + # elasticsearch_kerberos.elastic: + # build: ${ES_BEATS}/testing/environments/docker/elasticsearch_kerberos + # healthcheck: + # test: bash -c "/healthcheck.sh" + # retries: 1200 + # interval: 5s + # start_period: 60s + # environment: + # - "TERM=linux" + # - "ELASTIC_PASSWORD=changeme" + # - "ES_JAVA_OPTS=-Xms512m -Xmx512m -Djava.security.krb5.conf=/etc/krb5.conf" + # - "network.host=" + # - "transport.host=127.0.0.1" + # - "http.host=0.0.0.0" + # - "xpack.security.enabled=true" + # - "indices.id_field_data.enabled=true" + # - "xpack.license.self_generated.type=trial" + # - "xpack.security.authc.realms.kerberos.ELASTIC.order=1" + # - "xpack.security.authc.realms.kerberos.ELASTIC.keytab.path=/usr/share/elasticsearch/config/HTTP_elasticsearch_kerberos.elastic.keytab" + # hostname: elasticsearch_kerberos.elastic + # volumes: + # # This is needed otherwise there won't be enough entropy to generate a new kerberos realm + # - /dev/urandom:/dev/random + # ports: + # - 1088 + # - 1749 + # - 9200 + # command: bash -c "/start.sh" kibana: extends: From f2e161f6d2b38214966b42d5906592b6609e4d7e Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 15 Oct 2020 15:31:18 -0400 Subject: [PATCH 04/93] Fix syslog RFC 5424 parsing in CheckPoint module (#21854) Change the input type in the CheckPoint module to `udp` from `syslog` so the syslog parsing happens in the ingest node pipeline rather than in the Filebeat syslog input that only support RFC 3164. --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/modules/checkpoint.asciidoc | 18 ++++++++++-------- .../module/checkpoint/_meta/docs.asciidoc | 18 ++++++++++-------- .../checkpoint/firewall/config/firewall.yml | 5 ++--- 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 37a3366318f..9b5b614296e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -284,6 +284,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Provide backwards compatibility for the `append` processor when Elasticsearch is less than 7.10.0. {pull}21159[21159] - Fix checkpoint module when logs contain time field. {pull}20567[20567] - Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] +- Fix syslog RFC 5424 parsing in the CheckPoint module. {pull}21854[21854] *Heartbeat* diff --git a/filebeat/docs/modules/checkpoint.asciidoc b/filebeat/docs/modules/checkpoint.asciidoc index de72aabb2b3..c4e453b452d 100644 --- a/filebeat/docs/modules/checkpoint.asciidoc +++ b/filebeat/docs/modules/checkpoint.asciidoc @@ -12,25 +12,27 @@ This file is generated! See scripts/docs_collector.py == Check Point module beta[] -This is a module for Check Point firewall logs. It supports logs from the Log Exporter in the Syslog format. +This is a module for Check Point firewall logs. It supports logs from the Log +Exporter in the Syslog RFC 5424 format. If you need to ingest Check Point logs +in CEF format then please use the <> (more +fields are provided in the syslog output). -To configure a Log Exporter, please refer to the documentation by https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk122323[Check Point]. +To configure a Log Exporter, please refer to the documentation by +https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk122323[Check +Point]. -Example below: +Example Log Exporter config: `cp_log_export add name testdestination target-server 192.168.1.1 target-port 9001 protocol udp format syslog` -The module that supports Check Point firewall logs sent in the CEF format requires the <> - -The Check Point and ECS fields that are the same between both modules will be mapped to the same names for compability between modules, though not all fields are included in CEF. Please reference the supported fields in the CEF documentation. - include::../include/gs-link.asciidoc[] [float] === Compatibility -This module has been tested against Check Point Log Exporter on R80.X but should also work with R77.30. +This module has been tested against Check Point Log Exporter on R80.X but should +also work with R77.30. include::../include/configuring-intro.asciidoc[] diff --git a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc index b09dcde2333..ecd8e0d3e81 100644 --- a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc @@ -7,25 +7,27 @@ == Check Point module beta[] -This is a module for Check Point firewall logs. It supports logs from the Log Exporter in the Syslog format. +This is a module for Check Point firewall logs. It supports logs from the Log +Exporter in the Syslog RFC 5424 format. If you need to ingest Check Point logs +in CEF format then please use the <> (more +fields are provided in the syslog output). -To configure a Log Exporter, please refer to the documentation by https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk122323[Check Point]. +To configure a Log Exporter, please refer to the documentation by +https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk122323[Check +Point]. -Example below: +Example Log Exporter config: `cp_log_export add name testdestination target-server 192.168.1.1 target-port 9001 protocol udp format syslog` -The module that supports Check Point firewall logs sent in the CEF format requires the <> - -The Check Point and ECS fields that are the same between both modules will be mapped to the same names for compability between modules, though not all fields are included in CEF. Please reference the supported fields in the CEF documentation. - include::../include/gs-link.asciidoc[] [float] === Compatibility -This module has been tested against Check Point Log Exporter on R80.X but should also work with R77.30. +This module has been tested against Check Point Log Exporter on R80.X but should +also work with R77.30. include::../include/configuring-intro.asciidoc[] diff --git a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml index f447d2aacdf..4892400a8b9 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml @@ -1,8 +1,7 @@ {{ if eq .input "syslog" }} -type: syslog -protocol.udp: - host: "{{.syslog_host}}:{{.syslog_port}}" +type: udp +host: "{{.syslog_host}}:{{.syslog_port}}" {{ else if eq .input "file" }} From 325ee323ce2a60e9b88d033aaf02aeab3fedc3f5 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 15 Oct 2020 14:45:46 -0600 Subject: [PATCH 05/93] Add cloud.account.id into add_cloud_metadata for gcp (#21776) --- CHANGELOG.next.asciidoc | 1 + libbeat/processors/add_cloud_metadata/provider_google_gce.go | 3 +++ .../processors/add_cloud_metadata/provider_google_gce_test.go | 3 +++ 3 files changed, 7 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9b5b614296e..e2b5844c192 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -465,6 +465,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add istiod metricset. {pull}21519[21519] - Release `add_cloudfoundry_metadata` as GA. {pull}21525[21525] - Add support for OpenStack SSL metadata APIs in `add_cloud_metadata`. {pull}21590[21590] +- Add cloud.account.id for GCP into add_cloud_metadata processor. {pull}21776[21776] - Add proxy metricset for istio module. {pull}21751[21751] *Auditbeat* diff --git a/libbeat/processors/add_cloud_metadata/provider_google_gce.go b/libbeat/processors/add_cloud_metadata/provider_google_gce.go index 0fe69e1998d..c17c1dfe2bd 100644 --- a/libbeat/processors/add_cloud_metadata/provider_google_gce.go +++ b/libbeat/processors/add_cloud_metadata/provider_google_gce.go @@ -69,6 +69,9 @@ var gceMetadataFetcher = provider{ "project": s.Object{ "id": c.Str("projectId"), }, + "account": s.Object{ + "id": c.Str("projectId"), + }, }.ApplyTo(out, project) } diff --git a/libbeat/processors/add_cloud_metadata/provider_google_gce_test.go b/libbeat/processors/add_cloud_metadata/provider_google_gce_test.go index eccc07d4b30..0c810fe7a29 100644 --- a/libbeat/processors/add_cloud_metadata/provider_google_gce_test.go +++ b/libbeat/processors/add_cloud_metadata/provider_google_gce_test.go @@ -152,6 +152,9 @@ func TestRetrieveGCEMetadata(t *testing.T) { expected := common.MapStr{ "cloud": common.MapStr{ + "account": common.MapStr{ + "id": "test-dev", + }, "provider": "gcp", "instance": common.MapStr{ "id": "3910564293633576924", From 80d4209bc90ddb6317f74266e3adfd4b662929ad Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Fri, 16 Oct 2020 09:01:34 +0200 Subject: [PATCH 06/93] [Filebeat][okta] Fix okta pagination (#21797) * Fix okta pagination * Use cursor storage --- x-pack/filebeat/input/httpjson/date_cursor.go | 4 +++- x-pack/filebeat/input/httpjson/pagination.go | 2 +- x-pack/filebeat/input/httpjson/pagination_test.go | 2 +- x-pack/filebeat/input/httpjson/requester.go | 7 ++++--- x-pack/filebeat/module/okta/system/config/input.yml | 4 ++++ x-pack/filebeat/module/okta/system/manifest.yml | 3 +++ 6 files changed, 16 insertions(+), 6 deletions(-) diff --git a/x-pack/filebeat/input/httpjson/date_cursor.go b/x-pack/filebeat/input/httpjson/date_cursor.go index 66ca659de78..eb20573eff2 100644 --- a/x-pack/filebeat/input/httpjson/date_cursor.go +++ b/x-pack/filebeat/input/httpjson/date_cursor.go @@ -41,7 +41,9 @@ func newDateCursorFromConfig(config config, log *logp.Logger) *dateCursor { c.urlField = config.DateCursor.URLField c.initialInterval = config.DateCursor.InitialInterval c.dateFormat = config.DateCursor.getDateFormat() - c.valueTpl = config.DateCursor.ValueTemplate.Template + if config.DateCursor.ValueTemplate != nil { + c.valueTpl = config.DateCursor.ValueTemplate.Template + } return c } diff --git a/x-pack/filebeat/input/httpjson/pagination.go b/x-pack/filebeat/input/httpjson/pagination.go index 020bc783055..02bdb4b13de 100644 --- a/x-pack/filebeat/input/httpjson/pagination.go +++ b/x-pack/filebeat/input/httpjson/pagination.go @@ -72,7 +72,7 @@ func (p *pagination) nextRequestInfo(ri *requestInfo, response response, lastObj // getNextLinkFromHeader retrieves the next URL for pagination from the HTTP Header of the response func getNextLinkFromHeader(header http.Header, fieldName string, re *regexp.Regexp) (string, error) { - links, ok := header[fieldName] + links, ok := header[http.CanonicalHeaderKey(fieldName)] if !ok { return "", fmt.Errorf("field %s does not exist in the HTTP Header", fieldName) } diff --git a/x-pack/filebeat/input/httpjson/pagination_test.go b/x-pack/filebeat/input/httpjson/pagination_test.go index 32e3261c1e6..17dcae4fc62 100644 --- a/x-pack/filebeat/input/httpjson/pagination_test.go +++ b/x-pack/filebeat/input/httpjson/pagination_test.go @@ -14,7 +14,7 @@ import ( func TestGetNextLinkFromHeader(t *testing.T) { header := make(http.Header) - header.Add("Link", "; rel=\"self\"") + header.Add("link", "; rel=\"self\"") header.Add("Link", "; rel=\"next\"") re, _ := regexp.Compile("<([^>]+)>; *rel=\"next\"(?:,|$)") url, err := getNextLinkFromHeader(header, "Link", re) diff --git a/x-pack/filebeat/input/httpjson/requester.go b/x-pack/filebeat/input/httpjson/requester.go index df0a1efb1eb..bf9abff19ee 100644 --- a/x-pack/filebeat/input/httpjson/requester.go +++ b/x-pack/filebeat/input/httpjson/requester.go @@ -113,6 +113,7 @@ func (r *requester) processHTTPRequest(ctx context.Context, publisher cursor.Pub return err } + response.header = resp.Header responseData, err := ioutil.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read http response: %w", err) @@ -165,10 +166,10 @@ func (r *requester) processHTTPRequest(ctx context.Context, publisher cursor.Pub if err != nil { return err } - } - if lastObj != nil && r.dateCursor.enabled { - r.updateCursorState(ri.url, r.dateCursor.getNextValue(common.MapStr(lastObj))) + if lastObj != nil && r.dateCursor.enabled { + r.updateCursorState(ri.url, r.dateCursor.getNextValue(common.MapStr(lastObj))) + } } return nil diff --git a/x-pack/filebeat/module/okta/system/config/input.yml b/x-pack/filebeat/module/okta/system/config/input.yml index 487dfdf165e..990d1a5c921 100644 --- a/x-pack/filebeat/module/okta/system/config/input.yml +++ b/x-pack/filebeat/module/okta/system/config/input.yml @@ -44,6 +44,10 @@ ssl: {{ .ssl | tojson }} url: {{ .url }} {{ end }} +date_cursor.field: published +date_cursor.url_field: since +date_cursor.initial_interval: {{ .initial_interval }} + {{ else if eq .input "file" }} type: log diff --git a/x-pack/filebeat/module/okta/system/manifest.yml b/x-pack/filebeat/module/okta/system/manifest.yml index 1f3722113b2..f8f83fd9aee 100644 --- a/x-pack/filebeat/module/okta/system/manifest.yml +++ b/x-pack/filebeat/module/okta/system/manifest.yml @@ -8,6 +8,7 @@ var: default: "SSWS" - name: http_client_timeout - name: http_method + default: GET - name: http_headers - name: http_request_body - name: interval @@ -31,6 +32,8 @@ var: - name: tags default: [forwarded] - name: url + - name: initial_interval + default: 24h input: config/input.yml ingest_pipeline: ingest/pipeline.yml From 2151d157819b40b7a70b6deeab5aa46af25b57b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Fri, 16 Oct 2020 11:18:44 +0200 Subject: [PATCH 07/93] Add tests for fileProspector in filestream input (#21712) ## What does this PR do? This PR adds tests to see how `fileProspector` handles Create, Write and Delete operations. In order to make the `Prospector` testable I changed `HarvesterGroup` an interface so it can be mocked During the testing an issue with path identifier showed up when a file was deleted. The identifier generated an incorrect value for `Name`. Now it is fixed. --- filebeat/input/filestream/identifier.go | 6 +- .../internal/input-logfile/harvester.go | 10 +- .../internal/input-logfile/input.go | 2 +- .../internal/input-logfile/prospector.go | 2 +- filebeat/input/filestream/prospector.go | 15 +- filebeat/input/filestream/prospector_test.go | 197 ++++++++++++++++++ 6 files changed, 218 insertions(+), 14 deletions(-) create mode 100644 filebeat/input/filestream/prospector_test.go diff --git a/filebeat/input/filestream/identifier.go b/filebeat/input/filestream/identifier.go index 736c66da2f0..63883383a1c 100644 --- a/filebeat/input/filestream/identifier.go +++ b/filebeat/input/filestream/identifier.go @@ -116,11 +116,15 @@ func newPathIdentifier(_ *common.Config) (fileIdentifier, error) { } func (p *pathIdentifier) GetSource(e loginp.FSEvent) fileSource { + path := e.NewPath + if e.Op == loginp.OpDelete { + path = e.OldPath + } return fileSource{ info: e.Info, newPath: e.NewPath, oldPath: e.OldPath, - name: pluginName + identitySep + p.name + identitySep + e.NewPath, + name: pluginName + identitySep + p.name + identitySep + path, identifierGenerator: p.name, } } diff --git a/filebeat/input/filestream/internal/input-logfile/harvester.go b/filebeat/input/filestream/internal/input-logfile/harvester.go index d2f184cac7b..3c7573ad460 100644 --- a/filebeat/input/filestream/internal/input-logfile/harvester.go +++ b/filebeat/input/filestream/internal/input-logfile/harvester.go @@ -43,7 +43,11 @@ type Harvester interface { // HarvesterGroup is responsible for running the // Harvesters started by the Prospector. -type HarvesterGroup struct { +type HarvesterGroup interface { + Run(input.Context, Source) error +} + +type defaultHarvesterGroup struct { manager *InputManager readers map[string]context.CancelFunc pipeline beat.PipelineConnector @@ -54,7 +58,7 @@ type HarvesterGroup struct { } // Run starts the Harvester for a Source. -func (hg *HarvesterGroup) Run(ctx input.Context, s Source) error { +func (hg *defaultHarvesterGroup) Run(ctx input.Context, s Source) error { log := ctx.Logger.With("source", s.Name()) log.Debug("Starting harvester for file") @@ -111,7 +115,7 @@ func (hg *HarvesterGroup) Run(ctx input.Context, s Source) error { } // Cancel stops the running Harvester for a given Source. -func (hg *HarvesterGroup) Cancel(s Source) error { +func (hg *defaultHarvesterGroup) Cancel(s Source) error { if cancel, ok := hg.readers[s.Name()]; ok { cancel() return nil diff --git a/filebeat/input/filestream/internal/input-logfile/input.go b/filebeat/input/filestream/internal/input-logfile/input.go index 7084315b0c1..11092479cf3 100644 --- a/filebeat/input/filestream/internal/input-logfile/input.go +++ b/filebeat/input/filestream/internal/input-logfile/input.go @@ -59,7 +59,7 @@ func (inp *managedInput) Run( store.Retain() defer store.Release() - hg := &HarvesterGroup{ + hg := &defaultHarvesterGroup{ pipeline: pipeline, readers: make(map[string]context.CancelFunc), manager: inp.manager, diff --git a/filebeat/input/filestream/internal/input-logfile/prospector.go b/filebeat/input/filestream/internal/input-logfile/prospector.go index 9488596eb2c..185d6f9ec7e 100644 --- a/filebeat/input/filestream/internal/input-logfile/prospector.go +++ b/filebeat/input/filestream/internal/input-logfile/prospector.go @@ -28,7 +28,7 @@ import ( type Prospector interface { // Run starts the event loop and handles the incoming events // either by starting/stopping a harvester, or updating the statestore. - Run(input.Context, *statestore.Store, *HarvesterGroup) + Run(input.Context, *statestore.Store, HarvesterGroup) // Test checks if the Prospector is able to run the configuration // specified by the user. Test() error diff --git a/filebeat/input/filestream/prospector.go b/filebeat/input/filestream/prospector.go index 94670e18ce7..11f479ccef8 100644 --- a/filebeat/input/filestream/prospector.go +++ b/filebeat/input/filestream/prospector.go @@ -72,7 +72,7 @@ func newFileProspector( } // Run starts the fileProspector which accepts FS events from a file watcher. -func (p *fileProspector) Run(ctx input.Context, s *statestore.Store, hg *loginp.HarvesterGroup) { +func (p *fileProspector) Run(ctx input.Context, s *statestore.Store, hg loginp.HarvesterGroup) { log := ctx.Logger.With("prospector", prospectorDebugKey) log.Debug("Starting prospector") defer log.Debug("Prospector has stopped") @@ -100,8 +100,12 @@ func (p *fileProspector) Run(ctx input.Context, s *statestore.Store, hg *loginp. src := p.identifier.GetSource(fe) switch fe.Op { - case loginp.OpCreate: - log.Debugf("A new file %s has been found", fe.NewPath) + case loginp.OpCreate, loginp.OpWrite: + if fe.Op == loginp.OpCreate { + log.Debugf("A new file %s has been found", fe.NewPath) + } else if fe.Op == loginp.OpWrite { + log.Debugf("File %s has been updated", fe.NewPath) + } if p.ignoreOlder > 0 { now := time.Now() @@ -113,11 +117,6 @@ func (p *fileProspector) Run(ctx input.Context, s *statestore.Store, hg *loginp. hg.Run(ctx, src) - case loginp.OpWrite: - log.Debugf("File %s has been updated", fe.NewPath) - - hg.Run(ctx, src) - case loginp.OpDelete: log.Debugf("File %s has been removed", fe.OldPath) diff --git a/filebeat/input/filestream/prospector_test.go b/filebeat/input/filestream/prospector_test.go new file mode 100644 index 00000000000..1f75b12d2bd --- /dev/null +++ b/filebeat/input/filestream/prospector_test.go @@ -0,0 +1,197 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package filestream + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + loginp "github.com/elastic/beats/v7/filebeat/input/filestream/internal/input-logfile" + input "github.com/elastic/beats/v7/filebeat/input/v2" + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/statestore" + "github.com/elastic/beats/v7/libbeat/statestore/storetest" + "github.com/elastic/go-concert/unison" +) + +func TestProspectorNewAndUpdatedFiles(t *testing.T) { + minuteAgo := time.Now().Add(-1 * time.Minute) + + testCases := map[string]struct { + events []loginp.FSEvent + ignoreOlder time.Duration + expectedSources []string + }{ + "two new files": { + events: []loginp.FSEvent{ + loginp.FSEvent{Op: loginp.OpCreate, NewPath: "/path/to/file"}, + loginp.FSEvent{Op: loginp.OpCreate, NewPath: "/path/to/other/file"}, + }, + expectedSources: []string{"filestream::path::/path/to/file", "filestream::path::/path/to/other/file"}, + }, + "one updated file": { + events: []loginp.FSEvent{ + loginp.FSEvent{Op: loginp.OpWrite, NewPath: "/path/to/file"}, + }, + expectedSources: []string{"filestream::path::/path/to/file"}, + }, + "old files with ignore older configured": { + events: []loginp.FSEvent{ + loginp.FSEvent{ + Op: loginp.OpCreate, + NewPath: "/path/to/file", + Info: testFileInfo{"/path/to/file", 5, minuteAgo}, + }, + loginp.FSEvent{ + Op: loginp.OpWrite, + NewPath: "/path/to/other/file", + Info: testFileInfo{"/path/to/other/file", 5, minuteAgo}, + }, + }, + ignoreOlder: 10 * time.Second, + expectedSources: []string{}, + }, + "newer files with ignore older": { + events: []loginp.FSEvent{ + loginp.FSEvent{ + Op: loginp.OpCreate, + NewPath: "/path/to/file", + Info: testFileInfo{"/path/to/file", 5, minuteAgo}, + }, + loginp.FSEvent{ + Op: loginp.OpWrite, + NewPath: "/path/to/other/file", + Info: testFileInfo{"/path/to/other/file", 5, minuteAgo}, + }, + }, + ignoreOlder: 5 * time.Minute, + expectedSources: []string{"filestream::path::/path/to/file", "filestream::path::/path/to/other/file"}, + }, + } + + for name, test := range testCases { + test := test + + t.Run(name, func(t *testing.T) { + p := fileProspector{ + filewatcher: &mockFileWatcher{events: test.events}, + identifier: mustPathIdentifier(), + ignoreOlder: test.ignoreOlder, + } + ctx := input.Context{Logger: logp.L(), Cancelation: context.Background()} + hg := getTestHarvesterGroup() + + p.Run(ctx, testStateStore(), hg) + + assert.ElementsMatch(t, hg.encounteredNames, test.expectedSources) + }) + } +} + +func TestProspectorDeletedFile(t *testing.T) { + testCases := map[string]struct { + events []loginp.FSEvent + cleanRemoved bool + }{ + "one deleted file without clean removed": { + events: []loginp.FSEvent{ + loginp.FSEvent{Op: loginp.OpDelete, OldPath: "/path/to/file"}, + }, + cleanRemoved: false, + }, + "one deleted file with clean removed": { + events: []loginp.FSEvent{ + loginp.FSEvent{Op: loginp.OpDelete, OldPath: "/path/to/file"}, + }, + cleanRemoved: true, + }, + } + + for name, test := range testCases { + test := test + + t.Run(name, func(t *testing.T) { + p := fileProspector{ + filewatcher: &mockFileWatcher{events: test.events}, + identifier: mustPathIdentifier(), + cleanRemoved: test.cleanRemoved, + } + ctx := input.Context{Logger: logp.L(), Cancelation: context.Background()} + + testStore := testStateStore() + testStore.Set("filestream::path::/path/to/file", nil) + + p.Run(ctx, testStore, getTestHarvesterGroup()) + + has, err := testStore.Has("filestream::path::/path/to/file") + if err != nil { + t.Fatal(err) + } + + if test.cleanRemoved { + assert.False(t, has) + } else { + assert.True(t, has) + + } + }) + } +} + +type testHarvesterGroup struct { + encounteredNames []string +} + +func getTestHarvesterGroup() *testHarvesterGroup { return &testHarvesterGroup{make([]string, 0)} } + +func (t *testHarvesterGroup) Run(_ input.Context, s loginp.Source) error { + t.encounteredNames = append(t.encounteredNames, s.Name()) + return nil +} + +type mockFileWatcher struct { + nextIdx int + events []loginp.FSEvent +} + +func (m *mockFileWatcher) Event() loginp.FSEvent { + if len(m.events) == m.nextIdx { + return loginp.FSEvent{} + } + evt := m.events[m.nextIdx] + m.nextIdx++ + return evt +} +func (m *mockFileWatcher) Run(_ unison.Canceler) { return } + +func testStateStore() *statestore.Store { + s, _ := statestore.NewRegistry(storetest.NewMemoryStoreBackend()).Get(pluginName) + return s +} + +func mustPathIdentifier() fileIdentifier { + pathIdentifier, err := newPathIdentifier(nil) + if err != nil { + panic(err) + } + return pathIdentifier + +} From 80b8f536c9ac4a5d482c4f34d6d453b38810981c Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Fri, 16 Oct 2020 11:57:54 +0200 Subject: [PATCH 08/93] Fix panic on add_docker_metadata close (#21882) If the processor was not properly initialized, for example because it couldn't access the docker socket, then the watcher will be nil. Avoid trying to stop the watcher in that case. --- .../processors/add_docker_metadata/add_docker_metadata.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libbeat/processors/add_docker_metadata/add_docker_metadata.go b/libbeat/processors/add_docker_metadata/add_docker_metadata.go index beaca3bb46b..402a809b3ab 100644 --- a/libbeat/processors/add_docker_metadata/add_docker_metadata.go +++ b/libbeat/processors/add_docker_metadata/add_docker_metadata.go @@ -213,7 +213,10 @@ func (d *addDockerMetadata) Close() error { if d.cgroups != nil { d.cgroups.StopJanitor() } - d.watcher.Stop() + // Watcher can be nil if processor failed on creation + if d.watcher != nil { + d.watcher.Stop() + } err := processors.Close(d.sourceProcessor) if err != nil { return errors.Wrap(err, "closing source processor of add_docker_metadata") From 62e7250efd5e01c0a55c98cee7dc24a5c903ec31 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 16 Oct 2020 11:42:17 +0100 Subject: [PATCH 09/93] [CI] kind setup fails sometimes (#21857) --- Jenkinsfile | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index f3618d6615f..6eef1b2d0a8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -167,8 +167,8 @@ def cloud(Map args = [:]) { def k8sTest(Map args = [:]) { def versions = args.versions - node(args.label) { - versions.each{ v -> + versions.each{ v -> + node(args.label) { stage("${args.context} ${v}"){ withEnv(["K8S_VERSION=${v}", "KIND_VERSION=v0.7.0", "KUBECONFIG=${env.WORKSPACE}/kubecfg"]){ withGithubNotify(context: "${args.context} ${v}") { @@ -176,7 +176,19 @@ def k8sTest(Map args = [:]) { retryWithSleep(retries: 2, seconds: 5, backoff: true){ sh(label: "Install kind", script: ".ci/scripts/install-kind.sh") } retryWithSleep(retries: 2, seconds: 5, backoff: true){ sh(label: "Install kubectl", script: ".ci/scripts/install-kubectl.sh") } try { - sh(label: "Setup kind", script: ".ci/scripts/kind-setup.sh") + // Add some environmental resilience when setup does not work the very first time. + def i = 0 + retryWithSleep(retries: 3, seconds: 5, backoff: true){ + try { + sh(label: "Setup kind", script: ".ci/scripts/kind-setup.sh") + } catch(err) { + i++ + sh(label: 'Delete cluster', script: 'kind delete cluster') + if (i > 2) { + error("Setup kind failed with error '${err.toString()}'") + } + } + } sh(label: "Integration tests", script: "MODULE=kubernetes make -C metricbeat integration-tests") sh(label: "Deploy to kubernetes",script: "make -C deploy/kubernetes test") } finally { From 47862a19b970bc9e4c34752802f6d516763032cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 16 Oct 2020 12:43:02 +0200 Subject: [PATCH 10/93] chore: delegate variant pushes to the right method (#21861) * fix: delegate pushes to variants * chore: group conditions for x-pack * chore: simplify with endsWith Co-authored-by: Victor Martinez Co-authored-by: Victor Martinez --- .ci/packaging.groovy | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index f8a64f525c6..4145ee6bdd1 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -173,30 +173,20 @@ pipeline { def pushCIDockerImages(){ catchError(buildResult: 'UNSTABLE', message: 'Unable to push Docker images', stageResult: 'FAILURE') { - if ("${env.BEATS_FOLDER}" == "auditbeat"){ - tagAndPush('auditbeat-oss') - } else if ("${env.BEATS_FOLDER}" == "filebeat") { - tagAndPush('filebeat-oss') - } else if ("${env.BEATS_FOLDER}" == "heartbeat"){ - tagAndPush('heartbeat-oss') + if (env?.BEATS_FOLDER?.endsWith('auditbeat')) { + tagAndPush('auditbeat') + } else if (env?.BEATS_FOLDER?.endsWith('filebeat')) { + tagAndPush('filebeat') + } else if (env?.BEATS_FOLDER?.endsWith('heartbeat')) { + tagAndPush('heartbeat') } else if ("${env.BEATS_FOLDER}" == "journalbeat"){ tagAndPush('journalbeat') - tagAndPush('journalbeat-oss') - } else if ("${env.BEATS_FOLDER}" == "metricbeat"){ - tagAndPush('metricbeat-oss') + } else if (env?.BEATS_FOLDER?.endsWith('metricbeat')) { + tagAndPush('metricbeat') } else if ("${env.BEATS_FOLDER}" == "packetbeat"){ tagAndPush('packetbeat') - tagAndPush('packetbeat-oss') - } else if ("${env.BEATS_FOLDER}" == "x-pack/auditbeat"){ - tagAndPush('auditbeat') } else if ("${env.BEATS_FOLDER}" == "x-pack/elastic-agent") { tagAndPush('elastic-agent') - } else if ("${env.BEATS_FOLDER}" == "x-pack/filebeat"){ - tagAndPush('filebeat') - } else if ("${env.BEATS_FOLDER}" == "x-pack/heartbeat"){ - tagAndPush('heartbeat') - } else if ("${env.BEATS_FOLDER}" == "x-pack/metricbeat"){ - tagAndPush('metricbeat') } } } From bb79569dcf4723acd06eeba00d0baf8974961d8f Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Fri, 16 Oct 2020 15:54:00 +0200 Subject: [PATCH 11/93] [Ingest Manager] Use local temp instead of system one (#21883) [Ingest Manager] Use local temp instead of system one (#21883) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/application/paths/paths.go | 16 ++++++++++++++++ .../artifact/install/atomic/atomic_installer.go | 4 +++- .../install/atomic/atomic_installer_test.go | 6 ++++-- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index deae2522773..d01c8a1c7bf 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -16,6 +16,7 @@ - Include inputs in action store actions {pull}21298[21298] - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use local temp instead of system one {pull}21883[21883] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/application/paths/paths.go b/x-pack/elastic-agent/pkg/agent/application/paths/paths.go index b646f3796ba..fca3dbd8828 100644 --- a/x-pack/elastic-agent/pkg/agent/application/paths/paths.go +++ b/x-pack/elastic-agent/pkg/agent/application/paths/paths.go @@ -10,14 +10,20 @@ import ( "os" "path/filepath" "strings" + "sync" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) +const ( + tempSubdir = "tmp" +) + var ( topPath string configPath string logsPath string + tmpCreator sync.Once ) func init() { @@ -37,6 +43,16 @@ func Top() string { return topPath } +// TempDir returns agent temp dir located within data dir. +func TempDir() string { + tmpDir := filepath.Join(Data(), tempSubdir) + tmpCreator.Do(func() { + // create tempdir as it probably don't exists + os.MkdirAll(tmpDir, 0750) + }) + return tmpDir +} + // Home returns a directory where binary lives func Home() string { return versionedHome(topPath) diff --git a/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go b/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go index 5e26436bfc4..3dc0dbe232a 100644 --- a/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go +++ b/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer.go @@ -9,6 +9,8 @@ import ( "io/ioutil" "os" "path/filepath" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" ) type embeddedInstaller interface { @@ -31,7 +33,7 @@ func NewInstaller(i embeddedInstaller) (*Installer, error) { // Install performs installation of program in a specific version. func (i *Installer) Install(ctx context.Context, programName, version, installDir string) error { // tar installer uses Dir of installDir to determine location of unpack - tempDir, err := ioutil.TempDir(os.TempDir(), "elastic-agent-install") + tempDir, err := ioutil.TempDir(paths.TempDir(), "elastic-agent-install") if err != nil { return err } diff --git a/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer_test.go b/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer_test.go index d6266659b7d..a0bfa213ca7 100644 --- a/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer_test.go +++ b/x-pack/elastic-agent/pkg/artifact/install/atomic/atomic_installer_test.go @@ -14,6 +14,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" ) func TestOKInstall(t *testing.T) { @@ -25,7 +27,7 @@ func TestOKInstall(t *testing.T) { assert.NoError(t, err) ctx := context.Background() - installDir := filepath.Join(os.TempDir(), "install_dir") + installDir := filepath.Join(paths.TempDir(), "install_dir") wg.Add(1) go func() { @@ -59,7 +61,7 @@ func TestContextCancelledInstall(t *testing.T) { assert.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) - installDir := filepath.Join(os.TempDir(), "install_dir") + installDir := filepath.Join(paths.TempDir(), "install_dir") wg.Add(1) go func() { From 1f08e354d6b847da920fc96e113b54df26c47ffd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Fri, 16 Oct 2020 16:00:49 +0200 Subject: [PATCH 12/93] Add tests of reader of filestream input (#21814) ## What does this PR do? This PR adds tests for `logFile` in the `filestream` input. This element of the architecture is responsible for reading directly from the disk and closing the reader if the state or the position meets the configured criteria. Conditions tested in the PR: - file is removed - file is renamed - file is truncated - file is inactive for a time - file reader reaches EOF - timeout of the file reader is reached --- filebeat/input/filestream/filestream.go | 10 +- filebeat/input/filestream/filestream_test.go | 136 ++++++++++++++++++ .../filestream/filestream_test_non_windows.go | 104 ++++++++++++++ 3 files changed, 243 insertions(+), 7 deletions(-) create mode 100644 filebeat/input/filestream/filestream_test.go create mode 100644 filebeat/input/filestream/filestream_test_non_windows.go diff --git a/filebeat/input/filestream/filestream.go b/filebeat/input/filestream/filestream.go index 4d42bbf6242..1a559c67e06 100644 --- a/filebeat/input/filestream/filestream.go +++ b/filebeat/input/filestream/filestream.go @@ -138,20 +138,16 @@ func (f *logFile) Read(buf []byte) (int, error) { } func (f *logFile) startFileMonitoringIfNeeded() { - if f.closeInactive == 0 && f.closeAfterInterval == 0 { - return - } - - if f.closeInactive > 0 { + if f.closeInactive > 0 || f.closeRemoved || f.closeRenamed { f.tg.Go(func(ctx unison.Canceler) error { - f.closeIfTimeout(ctx) + f.periodicStateCheck(ctx) return nil }) } if f.closeAfterInterval > 0 { f.tg.Go(func(ctx unison.Canceler) error { - f.periodicStateCheck(ctx) + f.closeIfTimeout(ctx) return nil }) } diff --git a/filebeat/input/filestream/filestream_test.go b/filebeat/input/filestream/filestream_test.go new file mode 100644 index 00000000000..329fa0ad55f --- /dev/null +++ b/filebeat/input/filestream/filestream_test.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package filestream + +import ( + "context" + "io" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +func TestLogFileTimedClosing(t *testing.T) { + testCases := map[string]struct { + inactive time.Duration + closeEOF bool + afterInterval time.Duration + expectedErr error + }{ + "read from file and close inactive": { + inactive: 2 * time.Second, + expectedErr: ErrClosed, + }, + "read from file and close after interval": { + afterInterval: 3 * time.Second, + expectedErr: ErrClosed, + }, + "read from file and close on EOF": { + closeEOF: true, + expectedErr: io.EOF, + }, + } + + for name, test := range testCases { + test := test + + f := createTestLogFile() + defer f.Close() + defer os.Remove(f.Name()) + + t.Run(name, func(t *testing.T) { + reader, err := newFileReader( + logp.L(), + context.TODO(), + f, + readerConfig{}, + closerConfig{ + OnStateChange: stateChangeCloserConfig{ + CheckInterval: 1 * time.Second, + Inactive: test.inactive, + }, + Reader: readerCloserConfig{ + OnEOF: test.closeEOF, + AfterInterval: test.afterInterval, + }, + }, + ) + if err != nil { + t.Fatalf("error while creating logReader: %+v", err) + } + + err = readUntilError(reader) + + assert.Equal(t, test.expectedErr, err) + }) + } +} + +func TestLogFileTruncated(t *testing.T) { + f := createTestLogFile() + defer f.Close() + defer os.Remove(f.Name()) + + reader, err := newFileReader(logp.L(), context.TODO(), f, readerConfig{}, closerConfig{}) + if err != nil { + t.Fatalf("error while creating logReader: %+v", err) + } + + buf := make([]byte, 1024) + _, err = reader.Read(buf) + assert.Nil(t, err) + + err = f.Truncate(0) + if err != nil { + t.Fatalf("error while truncating file: %+v", err) + } + + err = readUntilError(reader) + + assert.Equal(t, ErrFileTruncate, err) +} + +func createTestLogFile() *os.File { + f, err := ioutil.TempFile("", "filestream_reader_test") + if err != nil { + panic(err) + } + content := []byte("first log line\nanother interesting line\na third log message\n") + if _, err := f.Write(content); err != nil { + panic(err) + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + panic(err) + } + return f +} + +func readUntilError(reader *logFile) error { + buf := make([]byte, 1024) + _, err := reader.Read(buf) + for err == nil { + buf := make([]byte, 1024) + _, err = reader.Read(buf) + } + return err +} diff --git a/filebeat/input/filestream/filestream_test_non_windows.go b/filebeat/input/filestream/filestream_test_non_windows.go new file mode 100644 index 00000000000..9c2b33ed3de --- /dev/null +++ b/filebeat/input/filestream/filestream_test_non_windows.go @@ -0,0 +1,104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !windows + +package filestream + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/logp" +) + +// these tests are separated as one cannot delete/rename files +// while another process is working with it on Windows +func TestLogFileRenamed(t *testing.T) { + f := createTestLogFile() + defer f.Close() + + renamedFile := f.Name() + ".renamed" + + reader, err := newFileReader( + logp.L(), + context.TODO(), + f, + readerConfig{}, + closerConfig{ + OnStateChange: stateChangeCloserConfig{ + CheckInterval: 1 * time.Second, + Renamed: true, + }, + }, + ) + if err != nil { + t.Fatalf("error while creating logReader: %+v", err) + } + + buf := make([]byte, 1024) + _, err = reader.Read(buf) + assert.Nil(t, err) + + err = os.Rename(f.Name(), renamedFile) + if err != nil { + t.Fatalf("error while renaming file: %+v", err) + } + + err = readUntilError(reader) + os.Remove(renamedFile) + + assert.Equal(t, ErrClosed, err) +} + +func TestLogFileRemoved(t *testing.T) { + f := createTestLogFile() + defer f.Close() + + reader, err := newFileReader( + logp.L(), + context.TODO(), + f, + readerConfig{}, + closerConfig{ + OnStateChange: stateChangeCloserConfig{ + CheckInterval: 1 * time.Second, + Removed: true, + }, + }, + ) + if err != nil { + t.Fatalf("error while creating logReader: %+v", err) + } + + buf := make([]byte, 1024) + _, err = reader.Read(buf) + assert.Nil(t, err) + + err = os.Remove(f.Name()) + if err != nil { + t.Fatalf("error while remove file: %+v", err) + } + + err = readUntilError(reader) + + assert.Equal(t, ErrClosed, err) +} From 9333376466d33542354479862ecfebce2723d177 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 16 Oct 2020 16:06:09 +0100 Subject: [PATCH 13/93] [CI] lint stage doesn't produce test reports (#21888) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6eef1b2d0a8..70cefee034b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -74,7 +74,7 @@ pipeline { } steps { withGithubNotify(context: 'Lint') { - withBeatsEnv(archive: true, id: 'lint') { + withBeatsEnv(archive: false, id: 'lint') { dumpVariables() cmd(label: 'make check', script: 'make check') } From 73dbb23daa3b0d8ca0f3bb7d8f8e0e89baa2e32c Mon Sep 17 00:00:00 2001 From: Toby McLaughlin Date: Sat, 17 Oct 2020 01:45:18 +1030 Subject: [PATCH 14/93] [docs] Remove extra word in autodiscover docs (#21871) --- libbeat/docs/shared-autodiscover.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libbeat/docs/shared-autodiscover.asciidoc b/libbeat/docs/shared-autodiscover.asciidoc index c7993c29bef..df0ea4d2e02 100644 --- a/libbeat/docs/shared-autodiscover.asciidoc +++ b/libbeat/docs/shared-autodiscover.asciidoc @@ -24,7 +24,7 @@ start/stop events. This ensures you don't need to worry about state, but only de The Docker autodiscover provider watches for Docker containers to start and stop. -These are the available fields during within config templating. The `docker.*` fields will be available on each emitted event. +These are the fields available within config templating. The `docker.*` fields will be available on each emitted event. event: * host @@ -130,7 +130,7 @@ endif::[] The Kubernetes autodiscover provider watches for Kubernetes nodes, pods, services to start, update, and stop. -These are the available fields during within config templating. The `kubernetes.*` fields will be available on each emitted event. +These are the fields available within config templating. The `kubernetes.*` fields will be available on each emitted event. [float] ====== Generic fields: From f936a45b3863bff20d26d9bfbd410779ffe5dc65 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 16 Oct 2020 16:34:59 +0100 Subject: [PATCH 15/93] [CI] Add stage name in the step (#21887) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 70cefee034b..52c579ab7f5 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -220,7 +220,7 @@ def target(Map args = [:]) { // make commands use -C while mage commands require the dir(folder) // let's support this scenario with the location variable. dir(isMage ? directory : '') { - cmd(label: "${command}", script: "${command}") + cmd(label: "${args.id?.trim() ? args.id : env.STAGE_NAME} - ${command}", script: "${command}") } } } From 4427fa59213839f075858266d94ca4f495e9d514 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Fri, 16 Oct 2020 17:54:27 +0200 Subject: [PATCH 16/93] Refactor docker watcher to fix flaky test and other small issues (#21851) Refactor docker watcher to fix some small issues and improve testability: * Actually release resources of previous connections when reconnecting. * Watcher uses a clock that can be mocked in tests for time-sensitive functionality. * Use nanoseconds-precision from events timestamps, this is important to avoid duplicated events on reconnection. * Fix logger initialization (it was being initialized as docker.docker). * Refactor test helpers to have more control on test watcher when needed. * Some other code refactors. --- CHANGELOG.next.asciidoc | 1 + libbeat/common/docker/watcher.go | 282 ++++++++++++++------------ libbeat/common/docker/watcher_test.go | 139 ++++++++----- 3 files changed, 242 insertions(+), 180 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e2b5844c192..51255305f42 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -186,6 +186,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix `libbeat.output.write.bytes` and `libbeat.output.read.bytes` metrics of the Elasticsearch output. {issue}20752[20752] {pull}21197[21197] - The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21259[21258] - Orderly close processors when processing pipelines are not needed anymore to release their resources. {pull}16349[16349] +- Fix memory leak and events duplication in docker autodiscover and add_docker_metadata. {pull}21851[21851] *Auditbeat* diff --git a/libbeat/common/docker/watcher.go b/libbeat/common/docker/watcher.go index 2421c232eee..4145423209a 100644 --- a/libbeat/common/docker/watcher.go +++ b/libbeat/common/docker/watcher.go @@ -20,7 +20,8 @@ package docker import ( - "fmt" + "context" + "io" "net/http" "sync" "time" @@ -29,7 +30,6 @@ import ( "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/go-connections/tlsconfig" - "golang.org/x/net/context" "github.com/elastic/beats/v7/libbeat/common/bus" "github.com/elastic/beats/v7/libbeat/logp" @@ -39,7 +39,6 @@ import ( const ( shortIDLen = 12 dockerRequestTimeout = 10 * time.Second - dockerWatchRequestTimeout = 60 * time.Minute dockerEventsWatchPityTimerInterval = 10 * time.Second dockerEventsWatchPityTimerTimeout = 10 * time.Minute ) @@ -74,20 +73,30 @@ type TLSConfig struct { type watcher struct { sync.RWMutex - log *logp.Logger - client Client - ctx context.Context - stop context.CancelFunc - containers map[string]*Container - deleted map[string]time.Time // deleted annotations key -> last access time - cleanupTimeout time.Duration - lastValidTimestamp int64 - lastWatchReceivedEventTime time.Time - stopped sync.WaitGroup - bus bus.Bus - shortID bool // whether to store short ID in "containers" too + log *logp.Logger + client Client + ctx context.Context + stop context.CancelFunc + containers map[string]*Container + deleted map[string]time.Time // deleted annotations key -> last access time + cleanupTimeout time.Duration + clock clock + stopped sync.WaitGroup + bus bus.Bus + shortID bool // whether to store short ID in "containers" too } +// clock is an interface used to provide mocked time on testing +type clock interface { + Now() time.Time +} + +// systemClock implements the clock interface using the system clock via the time package +type systemClock struct{} + +// Now returns the current time +func (*systemClock) Now() time.Time { return time.Now() } + // Container info retrieved by the watcher type Container struct { ID string @@ -147,8 +156,6 @@ func NewWatcher(log *logp.Logger, host string, tls *TLSConfig, storeShortID bool // NewWatcherWithClient creates a new Watcher from a given Docker client func NewWatcherWithClient(log *logp.Logger, client Client, cleanupTimeout time.Duration, storeShortID bool) (Watcher, error) { - log = log.Named("docker") - ctx, cancel := context.WithCancel(context.Background()) return &watcher{ log: log, @@ -160,6 +167,7 @@ func NewWatcherWithClient(log *logp.Logger, client Client, cleanupTimeout time.D cleanupTimeout: cleanupTimeout, bus: bus.New(log, "docker"), shortID: storeShortID, + clock: &systemClock{}, }, nil } @@ -177,7 +185,7 @@ func (w *watcher) Container(ID string) *Container { // Update last access time if it's deleted if ok { w.Lock() - w.deleted[container.ID] = time.Now() + w.deleted[container.ID] = w.clock.Now() w.Unlock() } @@ -201,7 +209,6 @@ func (w *watcher) Containers() map[string]*Container { func (w *watcher) Start() error { // Do initial scan of existing containers w.log.Debug("Start docker containers scanner") - w.lastValidTimestamp = time.Now().Unix() w.Lock() defer w.Unlock() @@ -236,108 +243,124 @@ func (w *watcher) Start() error { func (w *watcher) Stop() { w.stop() + w.stopped.Wait() } func (w *watcher) watch() { - log := w.log + defer w.stopped.Done() filter := filters.NewArgs() filter.Add("type", "container") - for { + // Ticker to restart the watcher when no events are received after some time. + tickChan := time.NewTicker(dockerEventsWatchPityTimerInterval) + defer tickChan.Stop() + + lastValidTimestamp := w.clock.Now() + + watch := func() bool { + lastReceivedEventTime := w.clock.Now() + + w.log.Debugf("Fetching events since %s", lastValidTimestamp) + options := types.EventsOptions{ - Since: fmt.Sprintf("%d", w.lastValidTimestamp), + Since: lastValidTimestamp.Format(time.RFC3339Nano), Filters: filter, } - log.Debugf("Fetching events since %s", options.Since) - ctx, cancel := context.WithTimeout(w.ctx, dockerWatchRequestTimeout) + ctx, cancel := context.WithCancel(w.ctx) defer cancel() events, errors := w.client.Events(ctx, options) - - //ticker for timeout to restart watcher when no events are received - w.lastWatchReceivedEventTime = time.Now() - tickChan := time.NewTicker(dockerEventsWatchPityTimerInterval) - defer tickChan.Stop() - - WATCH: for { select { case event := <-events: - log.Debugf("Got a new docker event: %v", event) - w.lastValidTimestamp = event.Time - w.lastWatchReceivedEventTime = time.Now() - - // Add / update - if event.Action == "start" || event.Action == "update" { - filter := filters.NewArgs() - filter.Add("id", event.Actor.ID) - - containers, err := w.listContainers(types.ContainerListOptions{ - Filters: filter, - }) - if err != nil || len(containers) != 1 { - log.Errorf("Error getting container info: %v", err) - continue - } - container := containers[0] - - w.Lock() - w.containers[event.Actor.ID] = container - if w.shortID { - w.containers[event.Actor.ID[:shortIDLen]] = container - } - // un-delete if it's flagged (in case of update or recreation) - delete(w.deleted, event.Actor.ID) - w.Unlock() - - w.bus.Publish(bus.Event{ - "start": true, - "container": container, - }) - } - - // Delete - if event.Action == "die" { - container := w.Container(event.Actor.ID) - if container != nil { - w.bus.Publish(bus.Event{ - "stop": true, - "container": container, - }) - } - - w.Lock() - w.deleted[event.Actor.ID] = time.Now() - w.Unlock() + w.log.Debugf("Got a new docker event: %v", event) + lastValidTimestamp = time.Unix(event.Time, event.TimeNano) + lastReceivedEventTime = w.clock.Now() + + switch event.Action { + case "start", "update": + w.containerUpdate(event) + case "die": + w.containerDelete(event) } - case err := <-errors: - // Restart watch call - if err == context.DeadlineExceeded { - log.Info("Context deadline exceeded for docker request, restarting watch call") - } else { - log.Errorf("Error watching for docker events: %+v", err) + switch err { + case io.EOF: + // Client disconnected, watch is not done, reconnect + w.log.Debug("EOF received in events stream, restarting watch call") + case context.DeadlineExceeded: + w.log.Debug("Context deadline exceeded for docker request, restarting watch call") + case context.Canceled: + // Parent context has been canceled, watch is done. + return true + default: + w.log.Errorf("Error watching for docker events: %+v", err) } - - time.Sleep(1 * time.Second) - break WATCH - + return false case <-tickChan.C: - if time.Since(w.lastWatchReceivedEventTime) > dockerEventsWatchPityTimerTimeout { - log.Infof("No events received within %s, restarting watch call", dockerEventsWatchPityTimerTimeout) - time.Sleep(1 * time.Second) - break WATCH + if time.Since(lastReceivedEventTime) > dockerEventsWatchPityTimerTimeout { + w.log.Infof("No events received within %s, restarting watch call", dockerEventsWatchPityTimerTimeout) + return false } - case <-w.ctx.Done(): - log.Debug("Watcher stopped") - w.stopped.Done() - return + w.log.Debug("Watcher stopped") + return true } } + } + for { + done := watch() + if done { + return + } + // Wait before trying to reconnect + time.Sleep(1 * time.Second) + } +} + +func (w *watcher) containerUpdate(event events.Message) { + filter := filters.NewArgs() + filter.Add("id", event.Actor.ID) + + containers, err := w.listContainers(types.ContainerListOptions{ + Filters: filter, + }) + if err != nil || len(containers) != 1 { + w.log.Errorf("Error getting container info: %v", err) + return + } + container := containers[0] + + w.Lock() + w.containers[event.Actor.ID] = container + if w.shortID { + w.containers[event.Actor.ID[:shortIDLen]] = container + } + // un-delete if it's flagged (in case of update or recreation) + delete(w.deleted, event.Actor.ID) + w.Unlock() + + w.bus.Publish(bus.Event{ + "start": true, + "container": container, + }) +} + +func (w *watcher) containerDelete(event events.Message) { + container := w.Container(event.Actor.ID) + + w.Lock() + w.deleted[event.Actor.ID] = w.clock.Now() + w.Unlock() + + if container != nil { + w.bus.Publish(bus.Event{ + "stop": true, + "container": container, + }) } } @@ -393,49 +416,52 @@ func (w *watcher) listContainers(options types.ContainerListOptions) ([]*Contain // Clean up deleted containers after they are not used anymore func (w *watcher) cleanupWorker() { - log := w.log + defer w.stopped.Done() for { select { case <-w.ctx.Done(): - w.stopped.Done() return // Wait a full period case <-time.After(w.cleanupTimeout): - // Check entries for timeout - var toDelete []string - timeout := time.Now().Add(-w.cleanupTimeout) - w.RLock() - for key, lastSeen := range w.deleted { - if lastSeen.Before(timeout) { - log.Debugf("Removing container %s after cool down timeout", key) - toDelete = append(toDelete, key) - } - } - w.RUnlock() - - // Delete timed out entries: - for _, key := range toDelete { - container := w.Container(key) - if container != nil { - w.bus.Publish(bus.Event{ - "delete": true, - "container": container, - }) - } - } + w.runCleanup() + } + } +} - w.Lock() - for _, key := range toDelete { - delete(w.deleted, key) - delete(w.containers, key) - if w.shortID { - delete(w.containers, key[:shortIDLen]) - } - } - w.Unlock() +func (w *watcher) runCleanup() { + // Check entries for timeout + var toDelete []string + timeout := w.clock.Now().Add(-w.cleanupTimeout) + w.RLock() + for key, lastSeen := range w.deleted { + if lastSeen.Before(timeout) { + w.log.Debugf("Removing container %s after cool down timeout", key) + toDelete = append(toDelete, key) + } + } + w.RUnlock() + + // Delete timed out entries: + for _, key := range toDelete { + container := w.Container(key) + if container != nil { + w.bus.Publish(bus.Event{ + "delete": true, + "container": container, + }) + } + } + + w.Lock() + for _, key := range toDelete { + delete(w.deleted, key) + delete(w.containers, key) + if w.shortID { + delete(w.containers, key[:shortIDLen]) } } + w.Unlock() } // ListenStart returns a bus listener to receive container started events, with a `container` key holding it diff --git a/libbeat/common/docker/watcher_test.go b/libbeat/common/docker/watcher_test.go index ec53fbdeb73..a0de0567af4 100644 --- a/libbeat/common/docker/watcher_test.go +++ b/libbeat/common/docker/watcher_test.go @@ -21,6 +21,7 @@ package docker import ( "errors" + "sync" "testing" "time" @@ -37,7 +38,7 @@ type MockClient struct { containers [][]types.Container // event list to send on Events call events []interface{} - + // done channel is closed when the client has sent all events done chan interface{} } @@ -71,7 +72,7 @@ func (m *MockClient) ContainerInspect(ctx context.Context, container string) (ty } func TestWatcherInitialization(t *testing.T) { - watcher := runWatcher(t, true, + watcher := runAndWait(testWatcher(t, true, [][]types.Container{ []types.Container{ types.Container{ @@ -90,7 +91,8 @@ func TestWatcherInitialization(t *testing.T) { }, }, }, - nil) + nil, + )) assert.Equal(t, map[string]*Container{ "0332dbd79e20": &Container{ @@ -109,7 +111,7 @@ func TestWatcherInitialization(t *testing.T) { } func TestWatcherInitializationShortID(t *testing.T) { - watcher := runWatcherShortID(t, true, + watcher := runAndWait(testWatcherShortID(t, true, [][]types.Container{ []types.Container{ types.Container{ @@ -128,7 +130,9 @@ func TestWatcherInitializationShortID(t *testing.T) { }, }, }, - nil, true) + nil, + true, + )) assert.Equal(t, map[string]*Container{ "1234567890123": &Container{ @@ -154,7 +158,7 @@ func TestWatcherInitializationShortID(t *testing.T) { } func TestWatcherAddEvents(t *testing.T) { - watcher := runWatcher(t, true, + watcher := runAndWait(testWatcher(t, true, [][]types.Container{ []types.Container{ types.Container{ @@ -188,7 +192,7 @@ func TestWatcherAddEvents(t *testing.T) { }, }, }, - ) + )) assert.Equal(t, map[string]*Container{ "0332dbd79e20": &Container{ @@ -207,7 +211,7 @@ func TestWatcherAddEvents(t *testing.T) { } func TestWatcherAddEventsShortID(t *testing.T) { - watcher := runWatcherShortID(t, true, + watcher := runAndWait(testWatcherShortID(t, true, [][]types.Container{ []types.Container{ types.Container{ @@ -242,7 +246,7 @@ func TestWatcherAddEventsShortID(t *testing.T) { }, }, true, - ) + )) assert.Equal(t, map[string]*Container{ "1234567890123": &Container{ @@ -261,7 +265,7 @@ func TestWatcherAddEventsShortID(t *testing.T) { } func TestWatcherUpdateEvent(t *testing.T) { - watcher := runWatcher(t, true, + watcher := runAndWait(testWatcher(t, true, [][]types.Container{ []types.Container{ types.Container{ @@ -295,7 +299,7 @@ func TestWatcherUpdateEvent(t *testing.T) { }, }, }, - ) + )) assert.Equal(t, map[string]*Container{ "0332dbd79e20": &Container{ @@ -309,7 +313,7 @@ func TestWatcherUpdateEvent(t *testing.T) { } func TestWatcherUpdateEventShortID(t *testing.T) { - watcher := runWatcherShortID(t, true, + watcher := runAndWait(testWatcherShortID(t, true, [][]types.Container{ []types.Container{ types.Container{ @@ -344,7 +348,7 @@ func TestWatcherUpdateEventShortID(t *testing.T) { }, }, true, - ) + )) assert.Equal(t, map[string]*Container{ "1234567890123": &Container{ @@ -358,9 +362,7 @@ func TestWatcherUpdateEventShortID(t *testing.T) { } func TestWatcherDie(t *testing.T) { - t.Skip("flaky test: https://github.com/elastic/beats/issues/7906") - - watcher := runWatcher(t, false, + watcher, clientDone := testWatcher(t, false, [][]types.Container{ []types.Container{ types.Container{ @@ -381,32 +383,37 @@ func TestWatcherDie(t *testing.T) { }, }, ) + + clock := newTestClock() + watcher.clock = clock + + stopListener := watcher.ListenStop() + + watcher.Start() defer watcher.Stop() // Check it doesn't get removed while we request meta for the container for i := 0; i < 18; i++ { watcher.Container("0332dbd79e20") - assert.Equal(t, 1, len(watcher.Containers())) - time.Sleep(50 * time.Millisecond) - } - - // Checks a max of 10s for the watcher containers to be updated - for i := 0; i < 100; i++ { - // Now it should get removed - time.Sleep(100 * time.Millisecond) - - if len(watcher.Containers()) == 0 { + clock.Sleep(watcher.cleanupTimeout / 2) + watcher.runCleanup() + if !assert.Equal(t, 1, len(watcher.Containers())) { break } } + // Wait to be sure that the delete event has been processed + <-clientDone + <-stopListener.Events() + + // Check that after the cleanup period the container is removed + clock.Sleep(watcher.cleanupTimeout + 1*time.Second) + watcher.runCleanup() assert.Equal(t, 0, len(watcher.Containers())) } func TestWatcherDieShortID(t *testing.T) { - t.Skip("flaky test: https://github.com/elastic/beats/issues/7906") - - watcher := runWatcherShortID(t, false, + watcher, clientDone := testWatcherShortID(t, false, [][]types.Container{ []types.Container{ types.Container{ @@ -428,33 +435,40 @@ func TestWatcherDieShortID(t *testing.T) { }, true, ) + + clock := newTestClock() + watcher.clock = clock + + stopListener := watcher.ListenStop() + + watcher.Start() defer watcher.Stop() // Check it doesn't get removed while we request meta for the container for i := 0; i < 18; i++ { watcher.Container("0332dbd79e20") - assert.Equal(t, 1, len(watcher.Containers())) - time.Sleep(50 * time.Millisecond) - } - - // Checks a max of 10s for the watcher containers to be updated - for i := 0; i < 100; i++ { - // Now it should get removed - time.Sleep(100 * time.Millisecond) - - if len(watcher.Containers()) == 0 { + clock.Sleep(watcher.cleanupTimeout / 2) + watcher.runCleanup() + if !assert.Equal(t, 1, len(watcher.Containers())) { break } } + // Wait to be sure that the delete event has been processed + <-clientDone + <-stopListener.Events() + + // Check that after the cleanup period the container is removed + clock.Sleep(watcher.cleanupTimeout + 1*time.Second) + watcher.runCleanup() assert.Equal(t, 0, len(watcher.Containers())) } -func runWatcher(t *testing.T, kill bool, containers [][]types.Container, events []interface{}) *watcher { - return runWatcherShortID(t, kill, containers, events, false) +func testWatcher(t *testing.T, kill bool, containers [][]types.Container, events []interface{}) (*watcher, chan interface{}) { + return testWatcherShortID(t, kill, containers, events, false) } -func runWatcherShortID(t *testing.T, kill bool, containers [][]types.Container, events []interface{}, enable bool) *watcher { +func testWatcherShortID(t *testing.T, kill bool, containers [][]types.Container, events []interface{}, enable bool) (*watcher, chan interface{}) { logp.TestingSetup() client := &MockClient{ @@ -472,16 +486,37 @@ func runWatcherShortID(t *testing.T, kill bool, containers [][]types.Container, t.Fatal("'watcher' was supposed to be pointer to the watcher structure") } - err = watcher.Start() - if err != nil { - t.Fatal(err) - } + return watcher, client.done +} - <-client.done - if kill { - watcher.Stop() - watcher.stopped.Wait() - } +func runAndWait(w *watcher, done chan interface{}) *watcher { + w.Start() + <-done + w.Stop() + return w +} + +type testClock struct { + sync.Mutex + + now time.Time +} + +func newTestClock() *testClock { + return &testClock{now: time.Time{}} +} + +func (c *testClock) Now() time.Time { + c.Lock() + defer c.Unlock() + + c.now = c.now.Add(1) + return c.now +} + +func (c *testClock) Sleep(d time.Duration) { + c.Lock() + defer c.Unlock() - return watcher + c.now = c.now.Add(d) } From f2a1ba304a9074515ef1d6f618813f6dbb7f7011 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Fri, 16 Oct 2020 18:00:03 -0400 Subject: [PATCH 17/93] [libbeat] Fix potential deadlock in the disk queue + add more unit tests (#21930) --- .../publisher/queue/diskqueue/core_loop.go | 27 +- .../queue/diskqueue/core_loop_test.go | 370 +++++++++++++++++- .../publisher/queue/diskqueue/reader_loop.go | 8 +- 3 files changed, 400 insertions(+), 5 deletions(-) diff --git a/libbeat/publisher/queue/diskqueue/core_loop.go b/libbeat/publisher/queue/diskqueue/core_loop.go index 638d9da2f40..77f4aadb47f 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop.go +++ b/libbeat/publisher/queue/diskqueue/core_loop.go @@ -169,8 +169,16 @@ func (dq *diskQueue) handleReaderLoopResponse(response readerLoopResponse) { // A segment in the writing list can't be finished writing, // so we don't check the endOffset. segment = dq.segments.writing[0] + if response.err != nil { + // Errors reading a writing segment are awkward since we can't discard + // them until the writer loop is done with them. Instead we just seek + // to the end of the current data region. If we're lucky this lets us + // skip the intervening errors; if not, the segment will be cleaned up + // after the writer loop is done with it. + dq.segments.nextReadOffset = segment.endOffset + } } - segment.framesRead = uint64(dq.segments.nextReadFrameID - segment.firstFrameID) + segment.framesRead += response.frameCount // If there was an error, report it. if response.err != nil { @@ -346,6 +354,16 @@ func (dq *diskQueue) maybeReadPending() { // A read request is already pending return } + // Check if the next reading segment has already been completely read. (This + // can happen if it was being written and read simultaneously.) In this case + // we should move it to the acking list and proceed to the next segment. + if len(dq.segments.reading) > 0 && + dq.segments.nextReadOffset >= dq.segments.reading[0].endOffset { + dq.segments.acking = append(dq.segments.acking, dq.segments.reading[0]) + dq.segments.reading = dq.segments.reading[1:] + dq.segments.nextReadOffset = 0 + } + // Get the next available segment from the reading or writing lists. segment := dq.segments.readingSegment() if segment == nil || dq.segments.nextReadOffset >= segmentOffset(segment.endOffset) { @@ -353,7 +371,12 @@ func (dq *diskQueue) maybeReadPending() { return } if dq.segments.nextReadOffset == 0 { - // If we're reading the beginning of this segment, assign its firstFrameID. + // If we're reading the beginning of this segment, assign its firstFrameID + // so we can recognize its acked frames later. + // The first segment we read might not have its initial nextReadOffset + // set to 0 if the segment was already partially read on a previous run. + // However that can only happen when nextReadFrameID == 0, so we don't + // need to do anything in that case. segment.firstFrameID = dq.segments.nextReadFrameID } request := readerLoopRequest{ diff --git a/libbeat/publisher/queue/diskqueue/core_loop_test.go b/libbeat/publisher/queue/diskqueue/core_loop_test.go index b5f0d301d15..309a145968d 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop_test.go +++ b/libbeat/publisher/queue/diskqueue/core_loop_test.go @@ -17,7 +17,12 @@ package diskqueue -import "testing" +import ( + "fmt" + "testing" + + "github.com/elastic/beats/v7/libbeat/logp" +) func TestProducerWriteRequest(t *testing.T) { dq := &diskQueue{settings: DefaultSettings()} @@ -92,3 +97,366 @@ func TestHandleWriterLoopResponse(t *testing.T) { dq.segments.writing[0].endOffset) } } + +func TestHandleReaderLoopResponse(t *testing.T) { + // handleReaderLoopResponse should: + // - advance segments.{nextReadFrameID, nextReadOffset} by the values in + // response.{frameCount, byteCount} + // - advance the target segment's framesRead field by response.frameCount + // - if reading[0] encountered an error or was completely read, move it from + // the reading list to the acking list and reset nextReadOffset to zero + // - if writing[0] encountered an error, advance nextReadOffset to the + // segment's current endOffset (we can't discard the active writing + // segment like we do for errors in the reading list, but we can still + // mark the remaining data as processed) + + testCases := map[string]struct { + // The segment structure to start with before calling maybeReadPending + segments diskQueueSegments + response readerLoopResponse + + expectedFrameID frameID + expectedOffset segmentOffset + expectedACKingSegment *segmentID + }{ + "completely read first reading segment": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 10, + byteCount: 1000, + }, + expectedFrameID: 15, + expectedOffset: 0, + expectedACKingSegment: segmentIDRef(1), + }, + "read first half of first reading segment": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 5, + byteCount: 500, + }, + expectedFrameID: 10, + expectedOffset: 500, + }, + "read second half of first reading segment": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + nextReadOffset: 500, + }, + response: readerLoopResponse{ + frameCount: 5, + byteCount: 500, + }, + expectedFrameID: 10, + expectedOffset: 0, + expectedACKingSegment: segmentIDRef(1), + }, + "read of first reading segment aborted by error": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 1, + byteCount: 100, + err: fmt.Errorf("something bad happened"), + }, + expectedFrameID: 6, + expectedOffset: 0, + expectedACKingSegment: segmentIDRef(1), + }, + "completely read first writing segment": { + segments: diskQueueSegments{ + writing: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 10, + byteCount: 1000, + }, + expectedFrameID: 15, + expectedOffset: 1000, + }, + "read first half of first writing segment": { + segments: diskQueueSegments{ + writing: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 5, + byteCount: 500, + }, + expectedFrameID: 10, + expectedOffset: 500, + }, + "read second half of first writing segment": { + segments: diskQueueSegments{ + writing: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadOffset: 500, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 5, + byteCount: 500, + }, + expectedFrameID: 10, + expectedOffset: 1000, + }, + "error reading a writing segments skips remaining data": { + segments: diskQueueSegments{ + writing: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadFrameID: 5, + }, + response: readerLoopResponse{ + frameCount: 1, + byteCount: 100, + err: fmt.Errorf("something bad happened"), + }, + expectedFrameID: 6, + expectedOffset: 1000, + }, + } + + for description, test := range testCases { + dq := &diskQueue{ + logger: logp.L(), + settings: DefaultSettings(), + segments: test.segments, + } + dq.handleReaderLoopResponse(test.response) + + if dq.segments.nextReadFrameID != test.expectedFrameID { + t.Errorf("%s: expected nextReadFrameID = %d, got %d", + description, test.expectedFrameID, dq.segments.nextReadFrameID) + } + if dq.segments.nextReadOffset != test.expectedOffset { + t.Errorf("%s: expected nextReadOffset = %d, got %d", + description, test.expectedOffset, dq.segments.nextReadOffset) + } + if test.expectedACKingSegment != nil { + if len(dq.segments.acking) == 0 { + t.Errorf("%s: expected acking segment %d, got none", + description, *test.expectedACKingSegment) + } else if dq.segments.acking[0].id != *test.expectedACKingSegment { + t.Errorf("%s: expected acking segment %d, got %d", + description, *test.expectedACKingSegment, dq.segments.acking[0].id) + } + } else if len(dq.segments.acking) != 0 { + t.Errorf("%s: expected no acking segment, got %v", + description, *dq.segments.acking[0]) + } + } +} + +func TestMaybeReadPending(t *testing.T) { + // maybeReadPending should: + // - If any unread data is available in a reading or writing segment, + // send a readerLoopRequest for the full amount available in the + // first such segment. + // - When creating a readerLoopRequest that includes the beginning of + // a segment (startOffset == 0), set that segment's firstFrameID + // to segments.nextReadFrameID (so ACKs based on frame ID can be linked + // back to the segment that generated them). + // - If the first reading segment has already been completely read (which + // can happen if it was read while still in the writing list), move it to + // the acking list and set segments.nextReadOffset to 0. + + testCases := map[string]struct { + // The segment structure to start with before calling maybeReadPending + segments diskQueueSegments + // The request we expect to see on the reader loop's request channel, + // or nil if there should be none. + expectedRequest *readerLoopRequest + // The segment ID we expect to see in the acking list, or nil for none. + expectedACKingSegment *segmentID + }{ + "read one full segment": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + // The next read request should start with frame 5 + nextReadFrameID: 5, + }, + expectedRequest: &readerLoopRequest{ + segment: &queueSegment{id: 1}, + startFrameID: 5, + startOffset: 0, + endOffset: 1000, + }, + }, + "read the end of a segment": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + // The next read request should start with frame 5 + nextReadFrameID: 5, + // Start reading at position 500 + nextReadOffset: 500, + }, + expectedRequest: &readerLoopRequest{ + segment: &queueSegment{id: 1}, + startFrameID: 5, + // Should be reading from nextReadOffset (500) to the end of + // the segment (1000). + startOffset: 500, + endOffset: 1000, + }, + }, + "ignore writing segments if reading is available": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + writing: []*queueSegment{ + {id: 2, endOffset: 1000}, + }, + }, + expectedRequest: &readerLoopRequest{ + segment: &queueSegment{id: 1}, + startOffset: 0, + endOffset: 1000, + }, + }, + "do nothing if no segments are available": { + segments: diskQueueSegments{}, + expectedRequest: nil, + }, + "read the writing segment if no reading segments are available": { + segments: diskQueueSegments{ + writing: []*queueSegment{ + {id: 2, endOffset: 1000}, + }, + nextReadOffset: 500, + }, + expectedRequest: &readerLoopRequest{ + segment: &queueSegment{id: 2}, + startOffset: 500, + endOffset: 1000, + }, + }, + "do nothing if the writing segment has already been fully read": { + segments: diskQueueSegments{ + writing: []*queueSegment{ + {id: 2, endOffset: 1000}, + }, + nextReadOffset: 1000, + }, + expectedRequest: nil, + }, + "skip the first reading segment if it's already been fully read": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + {id: 2, endOffset: 500}, + }, + nextReadOffset: 1000, + }, + expectedRequest: &readerLoopRequest{ + segment: &queueSegment{id: 2}, + startOffset: 0, + endOffset: 500, + }, + expectedACKingSegment: segmentIDRef(1), + }, + "move empty reading segment to the acking list if it's the only one": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + nextReadOffset: 1000, + }, + expectedRequest: nil, + expectedACKingSegment: segmentIDRef(1), + }, + } + + for description, test := range testCases { + dq := &diskQueue{ + settings: DefaultSettings(), + segments: test.segments, + readerLoop: &readerLoop{ + requestChan: make(chan readerLoopRequest, 1), + }, + } + firstFrameID := test.segments.nextReadFrameID + dq.maybeReadPending() + select { + case request := <-dq.readerLoop.requestChan: + if test.expectedRequest == nil { + t.Errorf("%s: expected no read request, got %v", + description, request) + break + } + if !equalReaderLoopRequests(request, *test.expectedRequest) { + t.Errorf("%s: expected request %v, got %v", + description, *test.expectedRequest, request) + } + if request.startOffset == 0 && + request.segment.firstFrameID != firstFrameID { + t.Errorf( + "%s: maybeReadPending should update firstFrameID", description) + } + default: + if test.expectedRequest != nil { + t.Errorf("%s: expected read request %v, got none", + description, test.expectedRequest) + } + } + if test.expectedACKingSegment != nil { + if len(dq.segments.acking) != 1 { + t.Errorf("%s: expected acking segment %v, got none", + description, *test.expectedACKingSegment) + } else if dq.segments.acking[0].id != *test.expectedACKingSegment { + t.Errorf("%s: expected acking segment %v, got %v", + description, *test.expectedACKingSegment, dq.segments.acking[0].id) + } + if dq.segments.nextReadOffset != 0 { + t.Errorf("%s: expected read offset 0 after acking segment, got %v", + description, dq.segments.nextReadOffset) + } + } else if len(dq.segments.acking) != 0 { + t.Errorf("%s: expected no acking segment, got %v", + description, *dq.segments.acking[0]) + } + } +} + +func segmentIDRef(id segmentID) *segmentID { + return &id +} + +func equalReaderLoopRequests( + r0 readerLoopRequest, r1 readerLoopRequest, +) bool { + // We compare segment ids rather than segment pointers because it's + // awkward to include the same pointer repeatedly in the test definition. + return r0.startOffset == r1.startOffset && + r0.endOffset == r1.endOffset && + r0.segment.id == r1.segment.id && + r0.startFrameID == r1.startFrameID +} diff --git a/libbeat/publisher/queue/diskqueue/reader_loop.go b/libbeat/publisher/queue/diskqueue/reader_loop.go index dc2bb95777f..5b30f03e81d 100644 --- a/libbeat/publisher/queue/diskqueue/reader_loop.go +++ b/libbeat/publisher/queue/diskqueue/reader_loop.go @@ -35,6 +35,8 @@ type readerLoopResponse struct { frameCount uint64 // The number of bytes successfully read from the requested segment file. + // If this is less than (endOffset - startOffset) from the original request, + // then err is guaranteed to be non-nil. byteCount uint64 // If there was an error in the segment file (i.e. inconsistent data), the @@ -100,7 +102,8 @@ func (rl *readerLoop) processRequest(request readerLoopRequest) readerLoopRespon return readerLoopResponse{err: err} } defer handle.Close() - _, err = handle.Seek(segmentHeaderSize+int64(request.startOffset), 0) + _, err = handle.Seek( + segmentHeaderSize+int64(request.startOffset), os.SEEK_SET) if err != nil { return readerLoopResponse{err: err} } @@ -137,7 +140,7 @@ func (rl *readerLoop) processRequest(request readerLoopRequest) readerLoopRespon } // We are done with this request if: - // - there was an error reading the frame, + // - there was an error reading the frame // - there are no more frames to read, or // - we have reached the end of the requested region if err != nil || frame == nil || byteCount >= targetLength { @@ -166,6 +169,7 @@ func (rl *readerLoop) processRequest(request readerLoopRequest) readerLoopRespon // nextFrame reads and decodes one frame from the given file handle, as long // it does not exceed the given length bound. The returned frame leaves the // segment and frame IDs unset. +// The returned error will be set if and only if the returned frame is nil. func (rl *readerLoop) nextFrame( handle *os.File, maxLength uint64, ) (*readFrame, error) { From 7c7261054383c87014286df6249aa95cf4ea6a21 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Fri, 16 Oct 2020 15:15:28 -0700 Subject: [PATCH 18/93] Add 7.7.1 relnotes to 7.8 docs (#21937) (#21941) * Add 7.7.1 changelog * Fix 15838 issue placement in CHANGELOG (#19105) Fix for https://github.com/elastic/beats/issues/15838 has first arrived in 7.6.1, not 7.5.0. Verification: https://github.com/elastic/beats/compare/v7.6.0...v7.6.1 * Add relnotes link Co-authored-by: Grzegorz Banasiak Co-authored-by: Grzegorz Banasiak --- CHANGELOG.asciidoc | 39 ++++++++++++++++++++++++++++++++++- libbeat/docs/release.asciidoc | 1 + 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index c4d0f48005f..1dfbb2fb889 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -457,6 +457,38 @@ https://github.com/elastic/beats/compare/v7.7.0...v7.8.0[View commits] - Add support for event IDs 4673,4674,4697,4698,4699,4700,4701,4702,4768,4769,4770,4771,4776,4778,4779,4964 to the Security module. {pull}17517[17517] - Add registry and code signature information and ECS categorization fields for sysmon module. {pull}18058[18058] +[[release-notes-7.7.1]] +=== Beats version 7.7.1 +https://github.com/elastic/beats/compare/v7.7.0...v7.7.1[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix `keystore add` command hanging on Windows. {issue}18649[18649] {pull}18654[18654] + +*Filebeat* + +- Unescape filenames in SQS messages to resolve file paths correctly. {pull}18370[18370] +- Improve failure handler for Cisco ASA and FTD pipelines to avoid mapping temporary fields. {issue}18391[18391] {pull}18392[18392] +- Fix `source.address` field not being set for the Nginx `ingress_controller` fileset. {pull}18511[18511] +- Fix Google Cloud `audit` fileset to only take in fields that are explicitly defined by the fileset. {issue}18465[18465] {pull}18472[18472] +- Fix rate limit related issue in the `httpjson` input for the Okta module. {issue}18530[18530] {pull}18534[18534] +- Fix Cisco ASA and FTD parsing errors caused by NAT fields that contain a hostname instead of an IP. {issue}14034[14034] {pull}18376[18376] +- Fix PANW module to use correct mappings for bytes and packets counters. {issue}18522[18522] {pull}18525[18525] +- Fix Office 365 ingest failures caused by IP addresses surrounded by square brackets. {issue}18587[18587] {pull}18591[18591] + +*Metricbeat* + +- Fix `tags_filter` setting to work correctly for the AWS `cloudwatch` metricset. {pull}18524[18524] + +==== Added + +*Filebeat* + +- Add support for Google Application Default Credentials to the Google Pub/Sub input and Google Cloud modules. {pull}15668[15668] +- Make `decode_cef` processor GA. {pull}17944[17944] + [[release-notes-7.7.0]] === Beats version 7.7.0 https://github.com/elastic/beats/compare/v7.6.2...v7.7.0[View commits] @@ -729,6 +761,12 @@ https://github.com/elastic/beats/compare/v7.6.0...v7.6.1[View commits] - Fix timeout option of GCP functions. {issue}16282[16282] {pull}16287[16287] +==== Added + +*Winlogbeat* + +- Made the event parser more lenient w.r.t. invalid event log definition version numbers. {issue}15838[15838] + [[release-notes-7.6.0]] === Beats version 7.6.0 https://github.com/elastic/beats/compare/v7.5.1...v7.6.0[View commits] @@ -1101,7 +1139,6 @@ processing events. (CVE-2019-17596) See https://www.elastic.co/community/securit - Fill `event.provider`. {pull}13937[13937] - Add support for user management events to the Security module. {pull}13530[13530] -- Made the event parser more lenient w.r.t. invalid event log definition version numbers. {issue}15838[15838] ==== Deprecated diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 24e0ee43651..90dd214787a 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -13,6 +13,7 @@ upgrade. * <> * <> * <> +* <> * <> * <> * <> From eeee0008b6f4d815b4ae54c88061db1bc00f4111 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Fri, 16 Oct 2020 16:36:59 -0700 Subject: [PATCH 19/93] Apply name changes to elastic agent docs (#21549) * Apply name changes to elastic agent docs * Temporarily comment out image * Remove reviewer notes --- .../docs/elastic-agent-command-line.asciidoc | 18 +++++++++--------- ...lastic-agent-configuration-example.asciidoc | 6 +++--- .../docs/elastic-agent-configuration.asciidoc | 2 +- .../docs/run-elastic-agent.asciidoc | 14 +++++++------- .../docs/running-on-kubernetes.asciidoc | 2 +- .../docs/unenroll-elastic-agent.asciidoc | 2 +- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/x-pack/elastic-agent/docs/elastic-agent-command-line.asciidoc b/x-pack/elastic-agent/docs/elastic-agent-command-line.asciidoc index e102d5b4787..49ddbdce466 100644 --- a/x-pack/elastic-agent/docs/elastic-agent-command-line.asciidoc +++ b/x-pack/elastic-agent/docs/elastic-agent-command-line.asciidoc @@ -46,10 +46,10 @@ elastic-agent enroll [--ca-sha256 ] === Options `kibana_url`:: -Required. URL of the {kib} endpoint where {ingest-manager} is running. +Required. URL of the {kib} endpoint where {fleet} is running. `enrollment_token`:: -Required. Enrollment token generated by {ingest-manager}. You can use the same +Required. Enrollment token generated by {fleet}. You can use the same enrollment token for multiple agents. `--ca-sha256 `:: @@ -60,7 +60,7 @@ verification. Comma-separated list of root certificates used for server verification. `--force`:: -Force overwrite of current configuration without prompting for confirmation. +Force overwrite of current policy without prompting for confirmation. This flag is helpful when using automation software or scripted deployments. `--help`:: @@ -125,9 +125,9 @@ elastic-agent help enroll [[elastic-agent-inspect-command]] == elastic-agent inspect -Show the current {agent} configuration. +Show the current {agent} policy. -If no parameters are specified, shows the full {agent} configuration. +If no parameters are specified, shows the full {agent} policy. [discrete] === Synopsis @@ -145,7 +145,7 @@ elastic-agent inspect output [--output ] [--program ] [discrete] === Options -`output`:: Display the current configuration for the output. This command +`output`:: Display the current policy for the output. This command accepts additional flags: + -- @@ -197,7 +197,7 @@ elastic-agent run [global-flags] These flags are valid whenever you run `elastic-agent` on the command line. `-c `:: -The configuration file to use. If not specified, {agent} uses +The policy file to use. If not specified, {agent} uses `{path.home}/elastic-agent.yml`. `--e`:: @@ -209,7 +209,7 @@ The environment in which the agent will run. //TODO: Clarify what we mean by environment by showing an example. `--path.config `:: -The directory where {agent} looks for its configuration file. The default +The directory where {agent} looks for its policy file. The default varies by platform. `--path.data `:: @@ -220,7 +220,7 @@ If not specified, {agent} uses `{path.home}/data`. `--path.home `:: The home directory of {agent}. `path.home` determines the location of the -configuration files and data directory. +policy files and data directory. + If not specified, {agent} uses the current working directory. diff --git a/x-pack/elastic-agent/docs/elastic-agent-configuration-example.asciidoc b/x-pack/elastic-agent/docs/elastic-agent-configuration-example.asciidoc index b5f0ed0aef6..cd4747b268e 100644 --- a/x-pack/elastic-agent/docs/elastic-agent-configuration-example.asciidoc +++ b/x-pack/elastic-agent/docs/elastic-agent-configuration-example.asciidoc @@ -1,10 +1,10 @@ -[[elastic-agent-configuration-example]] +[[elastic-agent-policy-example]] [role="xpack"] -= Configuration example += Policy example beta[] -The following example shows a full list of configuration options: +The following example shows a full list of policy options: [source,yaml] ---- diff --git a/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc b/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc index d72c572370c..98ba4a9b424 100644 --- a/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc +++ b/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc @@ -18,7 +18,7 @@ and send the logs and metrics to the same {es} instance. To alter this behavior, configure the output and other configuration settings. When running the agent standalone, specify configuration settings in the `elastic-agent.yml` file. When using {fleet}, do not modify settings in -the `elastic-agent.yml` file. Instead, use {ingest-manager} in {kib} to change +the `elastic-agent.yml` file. Instead, use {fleet} in {kib} to change settings. TIP: To get started quickly, you can use {fleet} to generate a standalone diff --git a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc index 7c48084b8fb..34bb2481f7f 100644 --- a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc +++ b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc @@ -12,8 +12,8 @@ configure and manage the agent. == Run in {fleet} mode With _fleet mode_, you manage {agent} remotely. The agent uses a trusted {kib} -instance to retrieve configurations and report agent events. This trusted {kib} -instance must have {ingest-manager} and {fleet} enabled. +instance to retrieve policies and report agent events. This trusted {kib} +instance must have {fleet} enabled. To create a trusted communication channel between {agent} and {kib}, enroll the agent to {fleet}. @@ -22,14 +22,14 @@ To enroll an {agent} to {fleet}: . Stop {agent}, if it's already running. -. In {ingest-manager}, click **Settings** and change the defaults, if necessary. +. In {fleet}, click **Settings** and change the defaults, if necessary. For self-managed installations, set the URLs for {es} and {kib}, including the http ports, then save your changes. + [role="screenshot"] -image::images/kibana-ingest-manager-settings.png[{ingest-manager} settings] +//image::images/kibana-fleet-settings.png[{fleet} settings] -. Select **{fleet}**, then click **Add agent** to get an enrollment token. See +. Select **Agents**, then click **Add agent** to get an enrollment token. See <> for detailed steps. . Change to the directory where {agent} is installed, and enroll the agent to @@ -60,8 +60,8 @@ To start {agent} manually, run: include::{beats-repo-dir}/x-pack/elastic-agent/docs/tab-widgets/run-standalone-widget.asciidoc[] -Use the `-c` flag to specify the configuration file. If no configuration file is -specified, {agent} uses the default configuration, `elastic-agent.yml`, which is +Use the `-c` flag to specify the policy file. If no policy file is +specified, {agent} uses the default policy, `elastic-agent.yml`, which is located in the same directory as {agent}. For configuration options, see <>. diff --git a/x-pack/elastic-agent/docs/running-on-kubernetes.asciidoc b/x-pack/elastic-agent/docs/running-on-kubernetes.asciidoc index 19b4628fde9..fc211baabac 100644 --- a/x-pack/elastic-agent/docs/running-on-kubernetes.asciidoc +++ b/x-pack/elastic-agent/docs/running-on-kubernetes.asciidoc @@ -44,7 +44,7 @@ curl -L -O https://raw.githubusercontent.com/elastic/beats/{branch}/deploy/kuber By default, {agent} is enrolled to an existing Kibana deployment, if present using the specified credentials. FLEET_ENROLLMENT_TOKEN parameter is used to connect Agent to the -corresponding Ingest Management configuration. It is suggested to connect Daemonset Agents to a node scope configuration +corresponding {agent} policy. It is suggested to connect Daemonset Agents to a node scope configuration and Deployment Agent to a cluster scope configuration. Then Kubernetes package will be deployed enabling cluster scope datasets using cluster scope configuration while node scope datasets will be enabled under node scope configuration. diff --git a/x-pack/elastic-agent/docs/unenroll-elastic-agent.asciidoc b/x-pack/elastic-agent/docs/unenroll-elastic-agent.asciidoc index cd77fc3dde3..78c7fab9cf9 100644 --- a/x-pack/elastic-agent/docs/unenroll-elastic-agent.asciidoc +++ b/x-pack/elastic-agent/docs/unenroll-elastic-agent.asciidoc @@ -4,7 +4,7 @@ You can unenroll an agent to invalidate the API key used to connect to {es}. -. In {ingest-manager}, select **{fleet}**. +. In {fleet}, select **Agents**. . Under Agents, choose **Unenroll** from the **Actions** menu next to the agent you want to unenroll. From 9dc2f8c3e873a62b4d0aaac5abc63633c61fa56a Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 19 Oct 2020 11:17:33 +0300 Subject: [PATCH 20/93] Kubernetes leaderelection improvements (#21896) --- libbeat/autodiscover/providers/kubernetes/kubernetes.go | 8 ++++++-- libbeat/common/kubernetes/util.go | 6 +++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/libbeat/autodiscover/providers/kubernetes/kubernetes.go index 190c646ef0c..e0c5dd103c0 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -249,9 +249,13 @@ func NewLeaderElectionManager( } else { id = "beats-leader-" + uuid.String() } + ns, err := kubernetes.InClusterNamespace() + if err != nil { + ns = "default" + } lease := metav1.ObjectMeta{ Name: cfg.LeaderLease, - Namespace: "default", + Namespace: ns, } metaUID := lease.GetObjectMeta().GetUID() lem.leaderElection = leaderelection.LeaderElectionConfig{ @@ -262,7 +266,7 @@ func NewLeaderElectionManager( Identity: id, }, }, - ReleaseOnCancel: true, + ReleaseOnCancel: false, LeaseDuration: 15 * time.Second, RenewDeadline: 10 * time.Second, RetryPeriod: 2 * time.Second, diff --git a/libbeat/common/kubernetes/util.go b/libbeat/common/kubernetes/util.go index ff60a7fa591..a92c81e6d21 100644 --- a/libbeat/common/kubernetes/util.go +++ b/libbeat/common/kubernetes/util.go @@ -101,7 +101,7 @@ func DiscoverKubernetesNode(log *logp.Logger, host string, inCluster bool, clien } ctx := context.TODO() if inCluster { - ns, err := inClusterNamespace() + ns, err := InClusterNamespace() if err != nil { log.Errorf("kubernetes: Couldn't get namespace when beat is in cluster with error: %+v", err.Error()) return defaultNode @@ -158,9 +158,9 @@ func machineID() string { return "" } -// inClusterNamespace gets namespace from serviceaccount when beat is in cluster. +// InClusterNamespace gets namespace from serviceaccount when beat is in cluster. // code borrowed from client-go with some changes. -func inClusterNamespace() (string, error) { +func InClusterNamespace() (string, error) { // get namespace associated with the service account token, if available data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { From 12e77167fceb6015595a885a8bf8280cba51248d Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 19 Oct 2020 12:31:23 +0300 Subject: [PATCH 21/93] Update docs.asciidoc (#21849) --- x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc index 4f7aa03a9ef..87a15d72a94 100644 --- a/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/proxy/_meta/docs.asciidoc @@ -21,7 +21,7 @@ them. Here is an example configuration that can be used for that purpose: metricbeat.autodiscover: providers: - type: kubernetes - include_annotations: ["prometheus.io.scrape"] + node: ${NODE_NAME} templates: - condition: contains: From 78856ca0404d7abb4d703a200ecefbbb2d436640 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 19 Oct 2020 10:37:45 +0100 Subject: [PATCH 22/93] [CI] Use google storage to keep artifacts (#21910) --- Jenkinsfile | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 52c579ab7f5..4099e820f97 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -384,7 +384,7 @@ def archiveTestOutput(Map args = [:]) { script: 'rm -rf ve || true; find . -type d -name vendor -exec rm -r {} \\;') } else { log(level: 'INFO', text: 'Delete folders that are causing exceptions (See JENKINS-58421) is disabled for Windows.') } junitAndStore(allowEmptyResults: true, keepLongStdio: true, testResults: args.testResults, stashedTestReports: stashedTestReports, id: args.id) - tar(file: "test-build-artifacts-${args.id}.tgz", dir: '.', archive: true, allowMissing: true) + tarAndUploadArtifacts(file: "test-build-artifacts-${args.id}.tgz", location: '.') } catchError(buildResult: 'SUCCESS', message: 'Failed to archive the build test results', stageResult: 'SUCCESS') { def folder = cmd(label: 'Find system-tests', returnStdout: true, script: 'python .ci/scripts/search_system_tests.py').trim() @@ -393,12 +393,25 @@ def archiveTestOutput(Map args = [:]) { // TODO: nodeOS() should support ARM def os_suffix = isArm() ? 'linux' : nodeOS() def name = folder.replaceAll('/', '-').replaceAll('\\\\', '-').replaceAll('build', '').replaceAll('^-', '') + '-' + os_suffix - tar(file: "${name}.tgz", archive: true, dir: folder) + tarAndUploadArtifacts(file: "${name}.tgz", location: folder) } } } } +/** +* Wrapper to tar and upload artifacts to Google Storage to avoid killing the +* disk space of the jenkins instance +*/ +def tarAndUploadArtifacts(Map args = [:]) { + tar(file: args.file, dir: args.location, archive: false, allowMissing: true) + googleStorageUpload(bucket: "gs://${JOB_GCS_BUCKET}/${env.JOB_NAME}-${env.BUILD_ID}", + credentialsId: "${JOB_GCS_CREDENTIALS}", + pattern: "${args.file}", + sharedPublicly: true, + showInline: true) +} + /** * This method executes a closure with credentials for cloud test * environments. From ee7d3298eaa6cab43ab708bce88e86af8bfb67d0 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Mon, 19 Oct 2020 14:12:50 +0200 Subject: [PATCH 23/93] [Ingest Manager] Prevent reporting ecs version twice (#21616) [Ingest Manager] Prevent reporting ecs version twice (#21616) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index d01c8a1c7bf..64d1a3b589b 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -15,6 +15,7 @@ - Copy Action store on upgrade {pull}21298[21298] - Include inputs in action store actions {pull}21298[21298] - Fix issue where inputs without processors defined would panic {pull}21628[21628] +- Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] diff --git a/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go b/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go index 53b9f377fcd..2b4617bc2bd 100644 --- a/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go +++ b/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go @@ -113,7 +113,6 @@ func (b *Monitor) EnrichArgs(process, pipelineID string, args []string, isSideca logFile = fmt.Sprintf("%s-json.log", logFile) appendix = append(appendix, "-E", "logging.json=true", - "-E", "logging.ecs=true", "-E", "logging.files.path="+loggingPath, "-E", "logging.files.name="+logFile, "-E", "logging.files.keepfiles=7", From 3874725d1e9c645a549b270be800a266bb2b3021 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 19 Oct 2020 09:16:52 -0400 Subject: [PATCH 24/93] [Elastic Agent] Fix index for Agent monitoring to to elastic_agent. (#21932) * Change to elastic_agent. * Add changelog. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/operation/monitoring.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 64d1a3b589b..5284da8db2b 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -18,6 +18,7 @@ - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index c4d895eb6ee..74d542d58e9 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -186,14 +186,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "paths": []string{ filepath.Join(paths.Home(), "logs", "elastic-agent-json.log"), }, - "index": "logs-elastic.agent-default", + "index": "logs-elastic_agent-default", "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": "elastic.agent", + "dataset": "elastic_agent", "namespace": "default", }, }, @@ -202,7 +202,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": "elastic.agent", + "dataset": "elastic_agent", }, }, }, @@ -220,14 +220,14 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "message_key": "message", }, "paths": paths, - "index": fmt.Sprintf("logs-elastic.agent.%s-default", name), + "index": fmt.Sprintf("logs-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "logs", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -236,7 +236,7 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), }, }, }, @@ -270,14 +270,14 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "metricsets": []string{"stats", "state"}, "period": "10s", "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic.agent.%s-default", name), + "index": fmt.Sprintf("metrics-elastic_agent.%s-default", name), "processors": []map[string]interface{}{ { "add_fields": map[string]interface{}{ "target": "data_stream", "fields": map[string]interface{}{ "type": "metrics", - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), "namespace": "default", }, }, @@ -286,7 +286,7 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string "add_fields": map[string]interface{}{ "target": "event", "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic.agent.%s", name), + "dataset": fmt.Sprintf("elastic_agent.%s", name), }, }, }, From 803ddcada71fcb0ccb398a7f64db38836bb9f472 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 19 Oct 2020 10:13:10 -0400 Subject: [PATCH 25/93] [Elastic Agent] Fix named pipe communication on Windows 7 (#21931) * Fix named pipes on Windows 7. * Add changelog fix notice. --- NOTICE.txt | 6 +++--- go.mod | 1 + go.sum | 4 ++-- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 349fe58b3d1..477f0b53201 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2183,12 +2183,12 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/auto -------------------------------------------------------------------------------- -Dependency : github.com/Microsoft/go-winio -Version: v0.4.15-0.20190919025122-fc70bd9a86b5 +Dependency : github.com/bi-zone/go-winio +Version: v0.4.15 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!microsoft/go-winio@v0.4.15-0.20190919025122-fc70bd9a86b5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/bi-zone/go-winio@v0.4.15/LICENSE: The MIT License (MIT) diff --git a/go.mod b/go.mod index 720690f1f2f..2ef65606319 100644 --- a/go.mod +++ b/go.mod @@ -189,6 +189,7 @@ require ( replace ( github.com/Azure/go-autorest => github.com/Azure/go-autorest v12.2.0+incompatible + github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec github.com/cucumber/godog => github.com/cucumber/godog v0.8.1 github.com/docker/docker => github.com/docker/engine v0.0.0-20191113042239-ea84732a7725 diff --git a/go.sum b/go.sum index 5c01c612fe3..97f31d79292 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,6 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -132,6 +130,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bi-zone/go-winio v0.4.15 h1:viLHm+U7bzIkfVHuWgc3Wp/sT5zaLoRG7XdOEy1b12w= +github.com/bi-zone/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa h1:aXHPZwx8Y5z8r+1WPylnu095usTf6QSshaHs6nVMBc0= github.com/blakerouse/service v1.1.1-0.20200924160513-057808572ffa/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 5284da8db2b..fa0198a6628 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -19,6 +19,7 @@ - Partial extracted beat result in failure to spawn beat {issue}21718[21718] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] +- Fix issue with named pipes on Windows 7 {pull}21931[21931] ==== New features From b2d1929bff02393360cc0292975b82da448151c3 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 19 Oct 2020 18:02:06 +0300 Subject: [PATCH 26/93] Stop storing stateless kubernetes keystores (#21880) --- CHANGELOG.next.asciidoc | 1 + .../k8skeystore/kubernetes_keystore.go | 20 +++++-------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 51255305f42..9f5d45e6a8e 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] *Packetbeat* diff --git a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go index 616525b432a..e17b4258232 100644 --- a/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go +++ b/libbeat/common/kubernetes/k8skeystore/kubernetes_keystore.go @@ -30,14 +30,10 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -type KubernetesKeystores map[string]keystore.Keystore - -// KubernetesKeystoresRegistry holds KubernetesKeystores for known namespaces. Once a Keystore for one k8s namespace -// is initialized it will be reused every time it is needed. +// KubernetesKeystoresRegistry implements a Provider for Keystore. type KubernetesKeystoresRegistry struct { - kubernetesKeystores KubernetesKeystores - logger *logp.Logger - client k8s.Interface + logger *logp.Logger + client k8s.Interface } // KubernetesSecretsKeystore allows to retrieve passwords from Kubernetes secrets for a given namespace @@ -56,9 +52,8 @@ func Factoryk8s(keystoreNamespace string, ks8client k8s.Interface, logger *logp. // NewKubernetesKeystoresRegistry initializes a KubernetesKeystoresRegistry func NewKubernetesKeystoresRegistry(logger *logp.Logger, client k8s.Interface) keystore.Provider { return &KubernetesKeystoresRegistry{ - kubernetesKeystores: KubernetesKeystores{}, - logger: logger, - client: client, + logger: logger, + client: client, } } @@ -75,12 +70,7 @@ func (kr *KubernetesKeystoresRegistry) GetKeystore(event bus.Event) keystore.Key namespace = ns.(string) } if namespace != "" { - // either retrieve already stored keystore or create a new one for the namespace - if storedKeystore, ok := kr.kubernetesKeystores[namespace]; ok { - return storedKeystore - } k8sKeystore, _ := Factoryk8s(namespace, kr.client, kr.logger) - kr.kubernetesKeystores["namespace"] = k8sKeystore return k8sKeystore } kr.logger.Debugf("Cannot retrieve kubernetes namespace from event: %s", event) From e29c3fae4adbdd066e58ca0df00ed8ca24b74d0d Mon Sep 17 00:00:00 2001 From: Niels Hofmans Date: Mon, 19 Oct 2020 17:19:31 +0200 Subject: [PATCH 27/93] filebeat: add SSL options to checkpoint module (#19560) * feat(firewall): add tls config * feat(firewall): add vars to manifest * chore(checkpoint): add tls to example * chore(checkpoint): run mage fmt update * cleanup(checkpoint): remove obsolete log_level * refactor(checkpoint): move to .ssl * chore(x-pack): revert ide fix * chore(changelog): add f5 asm ref * revert(changelog): remove f5 asm mod * chore(changelog): add checkpoint tls * chore: fix lint warnings * Undo some changes and move docs to checkpoint * Move changelog entry Co-authored-by: Marc Guasch --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/modules/checkpoint.asciidoc | 12 ++++++++++++ .../filebeat/module/checkpoint/_meta/docs.asciidoc | 12 ++++++++++++ .../module/checkpoint/firewall/config/firewall.yml | 8 +++++++- .../filebeat/module/checkpoint/firewall/manifest.yml | 1 + 5 files changed, 33 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9f5d45e6a8e..fd297059639 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -631,6 +631,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - New juniper.srx dataset for Juniper SRX logs. {pull}20017[20017] - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] +- Add SSL option to checkpoint module {pull}19560[19560] *Heartbeat* diff --git a/filebeat/docs/modules/checkpoint.asciidoc b/filebeat/docs/modules/checkpoint.asciidoc index c4e453b452d..841e66fdbab 100644 --- a/filebeat/docs/modules/checkpoint.asciidoc +++ b/filebeat/docs/modules/checkpoint.asciidoc @@ -70,6 +70,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc index ecd8e0d3e81..385206f03ff 100644 --- a/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/checkpoint/_meta/docs.asciidoc @@ -65,6 +65,18 @@ A list of tags to include in events. Including `forwarded` indicates that the events did not originate on this host and causes `host.name` to not be added to events. Defaults to `[checkpoint-firewall, forwarded]`. +*`var.ssl`*:: + +The SSL/TLS configuration for the filebeat instance. This can be used to enforce mutual TLS. +```yaml +ssl: + enabled: true + certificate_authorities: ["my-ca.pem"] + certificate: "filebeat-cert.pem" + key: "filebeat-key.pem" + client_authentication: "required" +``` + [float] ==== Check Point devices diff --git a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml index 4892400a8b9..9ac586c6b5c 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/config/firewall.yml @@ -1,4 +1,10 @@ -{{ if eq .input "syslog" }} +{{ if .ssl }} + +type: tcp +host: "{{.syslog_host}}:{{.syslog_port}}" +ssl: {{ .ssl | tojson }} + +{{ else if eq .input "syslog" }} type: udp host: "{{.syslog_host}}:{{.syslog_port}}" diff --git a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml index 849c20fafe2..69301541669 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/manifest.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/manifest.yml @@ -9,6 +9,7 @@ var: default: 9001 - name: input default: syslog + - name: ssl ingest_pipeline: - ingest/pipeline.yml From a79dddc8f9dcf5bad68f4a26e67840403e9e2cf7 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Mon, 19 Oct 2020 17:44:42 +0200 Subject: [PATCH 28/93] Fix TestDockerStart flaky test (#21681) Some changes are done to give more resilience to the test: * Wait till image pull is finished, and retry in case of failure. * Checked events are filtered by container id instead of image name, so tests are not affected by other containers that may be running in the system. * Check timeout is for all events now, instead of being reset after an event is received. * Container is removed after test is finished. --- .../docker/docker_integration_test.go | 20 ++++----- libbeat/tests/docker/docker.go | 41 ++++++++++++++++--- 2 files changed, 46 insertions(+), 15 deletions(-) diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index bbb2bc979bc..898f3cd254c 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -36,8 +36,6 @@ import ( // Test docker start emits an autodiscover event func TestDockerStart(t *testing.T) { - t.Skip("#20360 Flaky TestDockerStart skipped") - log := logp.NewLogger("docker") d, err := dk.NewClient() @@ -70,15 +68,17 @@ func TestDockerStart(t *testing.T) { // Start cmd := []string{"echo", "Hi!"} labels := map[string]string{"label": "foo", "label.child": "bar"} - ID, err := d.ContainerStart("busybox", cmd, labels) + ID, err := d.ContainerStart("busybox:latest", cmd, labels) if err != nil { t.Fatal(err) } - checkEvent(t, listener, true) + defer d.ContainerRemove(ID) + + checkEvent(t, listener, ID, true) // Kill d.ContainerKill(ID) - checkEvent(t, listener, false) + checkEvent(t, listener, ID, false) } func getValue(e bus.Event, key string) interface{} { @@ -89,12 +89,13 @@ func getValue(e bus.Event, key string) interface{} { return val } -func checkEvent(t *testing.T, listener bus.Listener, start bool) { +func checkEvent(t *testing.T, listener bus.Listener, id string, start bool) { + timeout := time.After(60 * time.Second) for { select { case e := <-listener.Events(): // Ignore any other container - if getValue(e, "docker.container.image") != "busybox" { + if getValue(e, "container.id") != id { continue } if start { @@ -104,7 +105,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "stop"), true) assert.Nil(t, getValue(e, "start")) } - assert.Equal(t, getValue(e, "container.image.name"), "busybox") + assert.Equal(t, getValue(e, "container.image.name"), "busybox:latest") // labels.dedot=true by default assert.Equal(t, common.MapStr{ @@ -122,8 +123,7 @@ func checkEvent(t *testing.T, listener bus.Listener, start bool) { assert.Equal(t, getValue(e, "docker.container.name"), getValue(e, "meta.container.name")) assert.Equal(t, getValue(e, "docker.container.image"), getValue(e, "meta.container.image.name")) return - - case <-time.After(10 * time.Second): + case <-timeout: t.Fatal("Timeout waiting for provider events") return } diff --git a/libbeat/tests/docker/docker.go b/libbeat/tests/docker/docker.go index 888347c5cc7..8bb5efadbfa 100644 --- a/libbeat/tests/docker/docker.go +++ b/libbeat/tests/docker/docker.go @@ -19,6 +19,8 @@ package docker import ( "context" + "io" + "io/ioutil" "github.com/pkg/errors" @@ -42,13 +44,12 @@ func NewClient() (Client, error) { // ContainerStart pulls and starts the given container func (c Client) ContainerStart(image string, cmd []string, labels map[string]string) (string, error) { - ctx := context.Background() - respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + err := c.imagePull(image) if err != nil { - return "", errors.Wrapf(err, "pullling image %s", image) + return "", err } - defer respBody.Close() + ctx := context.Background() resp, err := c.cli.ContainerCreate(ctx, &container.Config{ Image: image, Cmd: cmd, @@ -65,6 +66,36 @@ func (c Client) ContainerStart(image string, cmd []string, labels map[string]str return resp.ID, nil } +// imagePull pulls an image +func (c Client) imagePull(image string) (err error) { + ctx := context.Background() + _, _, err = c.cli.ImageInspectWithRaw(ctx, image) + if err == nil { + // Image already available, do nothing + return nil + } + for retry := 0; retry < 3; retry++ { + err = func() error { + respBody, err := c.cli.ImagePull(ctx, image, types.ImagePullOptions{}) + if err != nil { + return errors.Wrapf(err, "pullling image %s", image) + } + defer respBody.Close() + + // Read all the response, to be sure that the pull has finished before returning. + _, err = io.Copy(ioutil.Discard, respBody) + if err != nil { + return errors.Wrapf(err, "reading response for image %s", image) + } + return nil + }() + if err == nil { + break + } + } + return +} + // ContainerWait waits for a container to finish func (c Client) ContainerWait(ID string) error { ctx := context.Background() @@ -89,7 +120,7 @@ func (c Client) ContainerKill(ID string) error { return c.cli.ContainerKill(ctx, ID, "KILL") } -// ContainerRemove kills and removed the given container +// ContainerRemove kills and removes the given container func (c Client) ContainerRemove(ID string) error { ctx := context.Background() return c.cli.ContainerRemove(ctx, ID, types.ContainerRemoveOptions{ From 6955665227cb13504c52a01d3cbccd0a28c7ed9e Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Mon, 19 Oct 2020 13:24:56 -0700 Subject: [PATCH 29/93] fix diskio and memory bugs under windows (#21992) --- metricbeat/module/system/diskio/diskio.go | 3 ++- metricbeat/module/system/memory/memory.go | 6 +++--- metricbeat/module/system/process/process.go | 4 +++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/metricbeat/module/system/diskio/diskio.go b/metricbeat/module/system/diskio/diskio.go index 1359180cff6..4a7e2e2b5fe 100644 --- a/metricbeat/module/system/diskio/diskio.go +++ b/metricbeat/module/system/diskio/diskio.go @@ -21,6 +21,7 @@ package diskio import ( "fmt" + "runtime" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/metric/system/diskio" @@ -114,7 +115,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { diskWriteBytes += counters.WriteBytes //Add linux-only data if agent is off as not to make breaking changes. - if !m.IsAgent { + if !m.IsAgent && runtime.GOOS == "linux" { result, err := m.statistics.CalcIOStatistics(counters) if err != nil { return errors.Wrap(err, "error calculating iostat") diff --git a/metricbeat/module/system/memory/memory.go b/metricbeat/module/system/memory/memory.go index 27e76b85489..26c6bea1867 100644 --- a/metricbeat/module/system/memory/memory.go +++ b/metricbeat/module/system/memory/memory.go @@ -42,7 +42,7 @@ func init() { // MetricSet for fetching system memory metrics. type MetricSet struct { mb.BaseMetricSet - IsFleet bool + IsAgent bool } // New is a mb.MetricSetFactory that returns a memory.MetricSet. @@ -53,7 +53,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("unexpected module type") } - return &MetricSet{BaseMetricSet: base, IsFleet: systemModule.IsAgent}, nil + return &MetricSet{BaseMetricSet: base, IsAgent: systemModule.IsAgent}, nil } // Fetch fetches memory metrics from the OS. @@ -117,7 +117,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } // for backwards compatibility, only report if we're not in fleet mode - if !m.IsFleet { + if !m.IsAgent { err := linux.FetchLinuxMemStats(memory) if err != nil { return errors.Wrap(err, "error getting page stats") diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index 804c62d06d6..c99ffaa1123 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -156,10 +156,12 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { // There's some more Windows memory quirks we need to deal with. // "rss" is a linux concept, but "wss" is a direct match on Windows. // "share" is also unavailable on Windows. + if runtime.GOOS == "windows" { + proc.Delete("memory.share") + } if m.IsAgent { if runtime.GOOS == "windows" { - proc.Delete("memory.share") if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { proc.Put("memory.wss", setSize) } From fa50a44556a2c7d7f78855f98e12bf13848a0f9a Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Tue, 20 Oct 2020 09:28:30 +0200 Subject: [PATCH 30/93] Azure storage metricset values not inside the metricset name (#21845) * mofidy doc * fix * changelog --- CHANGELOG.next.asciidoc | 1 + x-pack/metricbeat/module/azure/storage/storage.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index fd297059639..6ead76346b3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -375,6 +375,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] *Packetbeat* diff --git a/x-pack/metricbeat/module/azure/storage/storage.go b/x-pack/metricbeat/module/azure/storage/storage.go index 9f54871b319..4178b911d11 100644 --- a/x-pack/metricbeat/module/azure/storage/storage.go +++ b/x-pack/metricbeat/module/azure/storage/storage.go @@ -41,6 +41,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, err } + // set default resource type to indicate this is not the generic monitor metricset + ms.Client.Config.DefaultResourceType = defaultStorageAccountNamespace // if no options are entered we will retrieve all the vm's from the entire subscription if len(ms.Client.Config.Resources) == 0 { ms.Client.Config.Resources = []azure.ResourceConfig{ From e7fd212d8c4974927a295002b399a09401a629f7 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 20 Oct 2020 11:28:32 +0200 Subject: [PATCH 31/93] [Ingest Manager] Always try snapshot repo for agent upgrade (#21951) [Ingest Manager] Always try snapshot repo for agent upgrade (#21951) --- x-pack/elastic-agent/pkg/agent/application/stream.go | 4 ++-- .../pkg/agent/application/upgrade/step_download.go | 4 ++-- x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go | 3 --- .../pkg/artifact/download/localremote/downloader.go | 4 ++-- .../pkg/artifact/download/localremote/verifier.go | 4 ++-- 5 files changed, 8 insertions(+), 11 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 41999fcb832..784038e77ab 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -56,9 +56,9 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { - fetcher := downloader.NewDownloader(log, config.DownloadConfig) + fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) if err != nil { return nil, errors.New(err, "initiating verifier") } diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go index 3aea96da0ab..0294308ff3a 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go @@ -27,12 +27,12 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp) + verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp, true) if err != nil { return "", errors.New(err, "initiating verifier") } - fetcher := downloader.NewDownloader(u.log, &settings) + fetcher := downloader.NewDownloader(u.log, &settings, true) path, err := fetcher.Download(ctx, agentName, agentArtifactName, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go index 1a21bc154a1..d7e69fc3972 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/upgrade.go @@ -183,9 +183,6 @@ func (u *Upgrader) Ack(ctx context.Context) error { } func (u *Upgrader) sourceURI(version, retrievedURI string) (string, error) { - if strings.HasSuffix(version, "-SNAPSHOT") && retrievedURI == "" { - return "", errors.New("snapshot upgrade requires source uri", errors.TypeConfig) - } if retrievedURI != "" { return retrievedURI, nil } diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go index 6448af25aca..ba82195ffbd 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go @@ -17,12 +17,12 @@ import ( // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(log *logger.Logger, config *artifact.Config) download.Downloader { +func NewDownloader(log *logger.Logger, config *artifact.Config, forceSnapshot bool) download.Downloader { downloaders := make([]download.Downloader, 0, 3) downloaders = append(downloaders, fs.NewDownloader(config)) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapDownloader, err := snapshot.NewDownloader(config) if err != nil { log.Error(err) diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go index 4f33cbbdb8e..30517d12d3d 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go @@ -17,7 +17,7 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte, forceSnapshot bool) (download.Verifier, error) { verifiers := make([]download.Verifier, 0, 3) fsVer, err := fs.NewVerifier(config, allowEmptyPgp, pgp) @@ -27,7 +27,7 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool verifiers = append(verifiers, fsVer) // try snapshot repo before official - if release.Snapshot() { + if release.Snapshot() || forceSnapshot { snapshotVerifier, err := snapshot.NewVerifier(config, allowEmptyPgp, pgp) if err != nil { log.Error(err) From 0d5ef7b3ccdbd4c83d2a2df018b654164383ecee Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 20 Oct 2020 12:49:18 +0300 Subject: [PATCH 32/93] [Kubernetes] Remove redundant dockersock volume mount (#22009) --- CHANGELOG.next.asciidoc | 1 + deploy/kubernetes/metricbeat-kubernetes.yaml | 5 ----- deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml | 5 ----- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6ead76346b3..fa8d1fc2791 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -377,6 +377,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Report the correct windows events for system/filesystem {pull}21758[21758] - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] *Packetbeat* diff --git a/deploy/kubernetes/metricbeat-kubernetes.yaml b/deploy/kubernetes/metricbeat-kubernetes.yaml index 32d1010f4d0..db1eb25d7a5 100644 --- a/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -189,8 +189,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -204,9 +202,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 diff --git a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index 0197fe136b6..34bcf536068 100644 --- a/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -64,8 +64,6 @@ spec: - name: modules mountPath: /usr/share/metricbeat/modules.d readOnly: true - - name: dockersock - mountPath: /var/run/docker.sock - name: proc mountPath: /hostfs/proc readOnly: true @@ -79,9 +77,6 @@ spec: - name: cgroup hostPath: path: /sys/fs/cgroup - - name: dockersock - hostPath: - path: /var/run/docker.sock - name: config configMap: defaultMode: 0640 From 0bb45f25cc4de6849ec419f2cecfca2aaa193cf7 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 20 Oct 2020 13:57:25 +0100 Subject: [PATCH 33/93] [beats-tester][packaging] store packages in another location (#21903) --- .ci/beats-tester.groovy | 3 +++ .ci/packaging.groovy | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.ci/beats-tester.groovy b/.ci/beats-tester.groovy index eb1357700b6..91781a98d31 100644 --- a/.ci/beats-tester.groovy +++ b/.ci/beats-tester.groovy @@ -54,6 +54,7 @@ pipeline { options { skipDefaultCheckout() } when { branch 'master' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -61,6 +62,7 @@ pipeline { options { skipDefaultCheckout() } when { branch '*.x' } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } @@ -84,6 +86,7 @@ pipeline { } } steps { + // TODO: to use the git commit that triggered the upstream build runBeatsTesterJob(version: "${env.VERSION}-SNAPSHOT") } } diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 4145ee6bdd1..8936de2fb3e 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -327,7 +327,16 @@ def publishPackages(baseDir){ bucketUri = "gs://${JOB_GCS_BUCKET}/pull-requests/pr-${env.CHANGE_ID}" } def beatsFolderName = getBeatsName(baseDir) - googleStorageUpload(bucket: "${bucketUri}/${beatsFolderName}", + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) + + // Copy those files to another location with the sha commit to test them + // aftewords. + bucketUri = "gs://${JOB_GCS_BUCKET}/commits/${env.GIT_BASE_COMMIT}" + uploadPackages("${bucketUri}/${beatsFolderName}", baseDir) +} + +def uploadPackages(bucketUri, baseDir){ + googleStorageUpload(bucket: bucketUri, credentialsId: "${JOB_GCS_CREDENTIALS}", pathPrefix: "${baseDir}/build/distributions/", pattern: "${baseDir}/build/distributions/**/*", From 38add00bffb8565c1b5e0cfe17776934af3ef525 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 20 Oct 2020 16:49:11 +0300 Subject: [PATCH 34/93] Fix Istio docs (#22019) Signed-off-by: chrismark --- metricbeat/docs/modules/istio.asciidoc | 4 ++-- x-pack/metricbeat/module/istio/_meta/docs.asciidoc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/metricbeat/docs/modules/istio.asciidoc b/metricbeat/docs/modules/istio.asciidoc index c80e2d84c09..fee401e1983 100644 --- a/metricbeat/docs/modules/istio.asciidoc +++ b/metricbeat/docs/modules/istio.asciidoc @@ -10,8 +10,8 @@ beta[] This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. diff --git a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc index f3b1825a9b1..39eb93b4095 100644 --- a/x-pack/metricbeat/module/istio/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/istio/_meta/docs.asciidoc @@ -1,7 +1,7 @@ This is the Istio module. This module is compatible with versions before `1.5` of Istio where microservices architecture is used. If using -versions priot to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used. -where the Istio module collects metrics from the +versions prior to `1.5` then `mesh`, `mixer`, `pilot`, `galley`, `citadel` metricsets should be used where the Istio +module collects metrics from the Istio https://istio.io/v1.4/docs/tasks/observability/metrics/querying-metrics/#about-the-prometheus-add-on[prometheus exporters endpoints]. For versions after `1.5`, `istiod` and `proxy` metricsets can be used. From 37dc557b2c04ab4c87eceea5271b6733e23d356e Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Tue, 20 Oct 2020 09:16:44 -0500 Subject: [PATCH 35/93] dynamically find librpm (#21936) - use elf header of rpm binary to find version of librpm - use librpm.so as fallback, provided by rpm-devel Closes #19287 --- .../module/system/package/rpm_linux.go | 63 ++++++++++++++----- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index fa6fc66f4cd..6e5df7e0c6e 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -10,9 +10,12 @@ import ( "errors" "fmt" "runtime" + "strings" "time" "unsafe" + "debug/elf" + "github.com/coreos/pkg/dlopen" ) @@ -204,29 +207,57 @@ func (lib *librpm) close() error { return nil } -func openLibrpm() (*librpm, error) { - var librpmNames = []string{ - "librpm.so", // with rpm-devel installed - "librpm.so.9", // Fedora 31/32 - "librpm.so.8", // Fedora 29/30 - "librpm.so.3", // CentOS 7 - "librpm.so.1", // CentOS 6 - - // Following for completeness, but not explicitly tested - "librpm.so.10", - "librpm.so.7", - "librpm.so.6", - "librpm.so.5", - "librpm.so.4", - "librpm.so.2", +// getLibrpmNames determines the versions of librpm.so that are +// installed on a system. rpm-devel rpm installs the librpm.so +// symbolic link to the correct version of librpm, but that isn't a +// required package. rpm will install librpm.so.X, where X is the +// version number. getLibrpmNames looks at the elf header for the rpm +// binary to determine what version of librpm.so it is linked against. +func getLibrpmNames() []string { + var rpmPaths = []string{ + "/usr/bin/rpm", + "/bin/rpm", + } + var libNames = []string{ + "librpm.so", } + var rpmElf *elf.File + var err error + + for _, path := range rpmPaths { + rpmElf, err = elf.Open(path) + if err == nil { + break + } + } + if err != nil { + return libNames + } + + impLibs, err := rpmElf.ImportedLibraries() + if err != nil { + return libNames + } + + for _, lib := range impLibs { + if strings.Contains(lib, "librpm.so") { + libNames = append(libNames, lib) + } + } + + return libNames +} + +func openLibrpm() (*librpm, error) { var librpm librpm var err error + librpmNames := getLibrpmNames() + librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, err + return nil, fmt.Errorf("Couldn't open %v", librpmNames) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") From 283641ec6ad66e09c2bf04be85b062764c6ce711 Mon Sep 17 00:00:00 2001 From: EamonnTP Date: Tue, 20 Oct 2020 16:08:23 +0100 Subject: [PATCH 36/93] Update links (#22012) --- libbeat/docs/getting-started.asciidoc | 4 ++-- libbeat/docs/howto/load-dashboards.asciidoc | 4 ++-- libbeat/docs/overview.asciidoc | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/libbeat/docs/getting-started.asciidoc b/libbeat/docs/getting-started.asciidoc index b1a85fddb46..5291f755e5b 100644 --- a/libbeat/docs/getting-started.asciidoc +++ b/libbeat/docs/getting-started.asciidoc @@ -13,5 +13,5 @@ Each Beat is a separately installable product. To learn how to get started, see: * {winlogbeat-ref}/winlogbeat-installation-configuration.html[Winlogbeat] If you're planning to use the {metrics-app} or the {logs-app} in {kib}, -also see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. \ No newline at end of file diff --git a/libbeat/docs/howto/load-dashboards.asciidoc b/libbeat/docs/howto/load-dashboards.asciidoc index 781789d3ae4..c03b512d636 100644 --- a/libbeat/docs/howto/load-dashboards.asciidoc +++ b/libbeat/docs/howto/load-dashboards.asciidoc @@ -15,8 +15,8 @@ ifdef::has_solutions[] TIP: For deeper observability into your infrastructure, you can use the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. endif::has_solutions[] {beatname_uc} comes packaged with example Kibana dashboards, visualizations, diff --git a/libbeat/docs/overview.asciidoc b/libbeat/docs/overview.asciidoc index 11dc10f2b8f..bdc46aaaf28 100644 --- a/libbeat/docs/overview.asciidoc +++ b/libbeat/docs/overview.asciidoc @@ -28,8 +28,8 @@ To get started, see <>. Want to get up and running quickly with infrastructure metrics monitoring and centralized log analytics? Try out the {metrics-app} and the {logs-app} in {kib}. -For more details, see the {metrics-guide}[Metrics Monitoring Guide] -and the {logs-guide}[Logs Monitoring Guide]. +For more details, see {observability-guide}/analyze-metrics.html[Analyze metrics] +and {observability-guide}/monitor-logs.html[Monitor logs]. [float] === Need to capture other kinds of data? From e0d06541847dd27b04d2ac328dadf73ac7f883d3 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Tue, 20 Oct 2020 11:52:56 -0400 Subject: [PATCH 37/93] Document auditbeat system process module config (#21766) The documentation for the system/process dataset was missing information on the configuration options. Closes #16869 --- x-pack/auditbeat/docs/modules/system.asciidoc | 2 +- .../module/system/_meta/docs.asciidoc | 2 +- .../module/system/process/_meta/docs.asciidoc | 22 ++++++++++++++++++- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/x-pack/auditbeat/docs/modules/system.asciidoc b/x-pack/auditbeat/docs/modules/system.asciidoc index 15eafc34116..e850c065197 100644 --- a/x-pack/auditbeat/docs/modules/system.asciidoc +++ b/x-pack/auditbeat/docs/modules/system.asciidoc @@ -97,7 +97,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/_meta/docs.asciidoc index 083435d94ae..a2a36987c51 100644 --- a/x-pack/auditbeat/module/system/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/_meta/docs.asciidoc @@ -90,7 +90,7 @@ This module also supports the <> described later. -*`state.period`*:: The frequency at which the datasets send full state information. +*`state.period`*:: The interval at which the datasets send full state information. This option can be overridden per dataset using `{dataset}.state.period`. *`user.detect_password_changes`*:: If the `user` dataset is configured and diff --git a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc index e1d930e1fbf..e84f7246933 100644 --- a/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc +++ b/x-pack/auditbeat/module/system/process/_meta/docs.asciidoc @@ -2,10 +2,30 @@ beta[] -This is the `process` dataset of the system module. +This is the `process` dataset of the system module. It generates an event when +a process starts and stops. It is implemented for Linux, macOS (Darwin), and Windows. +[float] +=== Configuration options + +*`process.state.period`*:: The interval at which the dataset sends full state +information. If set this will take precedence over `state.period`. The default +value is `12h`. + +*`process.hash.max_file_size`*:: The maximum size of a file in bytes for which +{beatname_uc} will compute hashes. Files larger than this size will not be +hashed. The default value is 100 MiB. For convenience units can be specified as +a suffix to the value. The supported units are `b` (default), `kib`, `kb`, +`mib`, `mb`, `gib`, `gb`, `tib`, `tb`, `pib`, `pb`, `eib`, and `eb`. + +*`process.hash.hash_types`*:: A list of hash types to compute when the file +changes. The supported hash types are `blake2b_256`, `blake2b_384`, +`blake2b_512`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, +`sha512_224`, `sha512_256`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, and +`xxh64`. The default value is `sha1`. + [float] ==== Example dashboard From 610e998c121e9453363a0f429c5f8d197eb1350d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 20 Oct 2020 12:29:00 -0400 Subject: [PATCH 38/93] [Elastic Agent] Fix missing elastic_agent event data (#21994) * Fix fields. * Remove from monitoring decorator. * Add changelog. * Fix tests. * Fix tests. * Fix import. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../pkg/agent/application/local_mode.go | 2 +- .../pkg/agent/application/managed_mode.go | 2 +- .../agent/application/monitoring_decorator.go | 1 - .../pkg/agent/application/stream.go | 8 +++-- .../pkg/agent/operation/common_test.go | 4 ++- .../pkg/agent/operation/monitoring.go | 30 +++++++++++++++++++ .../pkg/agent/operation/monitoring_test.go | 4 ++- .../pkg/agent/operation/operator.go | 4 +++ .../testdata/enabled_output_true-filebeat.yml | 8 ++--- .../testdata/enabled_true-filebeat.yml | 8 ++--- .../testdata/single_config-filebeat.yml | 16 +++++----- .../testdata/single_config-metricbeat.yml | 24 +++++++-------- .../pkg/agent/transpiler/rules.go | 8 ++--- .../pkg/agent/transpiler/rules_test.go | 16 +++++----- 15 files changed, 88 insertions(+), 48 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index fa0198a6628..3882ba19712 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -20,6 +20,7 @@ - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] +- Fix missing elastic_agent event data {pull}21994[21994] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/application/local_mode.go b/x-pack/elastic-agent/pkg/agent/application/local_mode.go index b58e260cab6..f0c4153f474 100644 --- a/x-pack/elastic-agent/pkg/agent/application/local_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/local_mode.go @@ -100,7 +100,7 @@ func newLocal( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(localApplication.bgContext, cfg.Settings, localApplication.srv, reporter, monitor)) + router, err := newRouter(log, streamFactory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go index e38685741c3..fa31215f75d 100644 --- a/x-pack/elastic-agent/pkg/agent/application/managed_mode.go +++ b/x-pack/elastic-agent/pkg/agent/application/managed_mode.go @@ -154,7 +154,7 @@ func newManaged( return nil, errors.New(err, "failed to initialize monitoring") } - router, err := newRouter(log, streamFactory(managedApplication.bgContext, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) + router, err := newRouter(log, streamFactory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor)) if err != nil { return nil, errors.New(err, "fail to initialize pipeline router") } diff --git a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go index 3fc49ef17d3..920b1a4b5bf 100644 --- a/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go +++ b/x-pack/elastic-agent/pkg/agent/application/monitoring_decorator.go @@ -94,7 +94,6 @@ func getMonitoringRule(outputName string) *transpiler.RuleList { return transpiler.NewRuleList( transpiler.Copy(monitoringOutputSelector, outputKey), transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), elasticsearchKey), - transpiler.InjectAgentInfo(), transpiler.Filter(monitoringKey, programsKey, outputKey), ) } diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 784038e77ab..2d372ef4387 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -7,6 +7,7 @@ package application import ( "context" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -40,10 +41,10 @@ func (b *operatorStream) Shutdown() { b.configHandler.Shutdown() } -func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { +func streamFactory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) func(*logger.Logger, routingKey) (stream, error) { return func(log *logger.Logger, id routingKey) (stream, error) { // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, id, cfg, srv, r, m) + operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m) if err != nil { return nil, err } @@ -55,7 +56,7 @@ func streamFactory(ctx context.Context, cfg *configuration.SettingsConfig, srv * } } -func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { +func newOperator(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) allowEmptyPgp, pgp := release.PGP() verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) @@ -81,6 +82,7 @@ func newOperator(ctx context.Context, log *logger.Logger, id routingKey, config return operation.NewOperator( ctx, log, + agentInfo, id, config, fetcher, diff --git a/x-pack/elastic-agent/pkg/agent/operation/common_test.go b/x-pack/elastic-agent/pkg/agent/operation/common_test.go index e9d40bece87..ea16cfe77b8 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/common_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/common_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -48,6 +49,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -67,7 +69,7 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a t.Fatal(err) } - operator, err := NewOperator(context.Background(), l, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) + operator, err := NewOperator(context.Background(), l, agentInfo, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor()) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go index 74d542d58e9..1959cd52818 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring.go @@ -206,6 +206,16 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }, } @@ -240,6 +250,16 @@ func (o *Operator) getMonitoringFilebeatConfig(output interface{}) (map[string]i }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }) } @@ -290,6 +310,16 @@ func (o *Operator) getMonitoringMetricbeatConfig(output interface{}) (map[string }, }, }, + { + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": o.agentInfo.AgentID(), + "version": o.agentInfo.Version(), + "snapshot": o.agentInfo.Snapshot(), + }, + }, + }, }, }) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go index eef904096f7..3ca6a5f6b14 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go +++ b/x-pack/elastic-agent/pkg/agent/operation/monitoring_test.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/stateresolver" @@ -112,6 +113,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } l := getLogger() + agentInfo, _ := info.NewAgentInfo() fetcher := &DummyDownloader{} verifier := &DummyVerifier{} @@ -128,7 +130,7 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M } ctx := context.Background() - operator, err := NewOperator(ctx, l, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) + operator, err := NewOperator(ctx, l, agentInfo, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m) if err != nil { t.Fatal(err) } diff --git a/x-pack/elastic-agent/pkg/agent/operation/operator.go b/x-pack/elastic-agent/pkg/agent/operation/operator.go index b4938278821..1a39e73500e 100644 --- a/x-pack/elastic-agent/pkg/agent/operation/operator.go +++ b/x-pack/elastic-agent/pkg/agent/operation/operator.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/info" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configrequest" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" @@ -43,6 +44,7 @@ type Operator struct { bgContext context.Context pipelineID string logger *logger.Logger + agentInfo *info.AgentInfo config *configuration.SettingsConfig handlers map[string]handleFunc stateResolver *stateresolver.StateResolver @@ -66,6 +68,7 @@ type Operator struct { func NewOperator( ctx context.Context, logger *logger.Logger, + agentInfo *info.AgentInfo, pipelineID string, config *configuration.SettingsConfig, fetcher download.Downloader, @@ -85,6 +88,7 @@ func NewOperator( config: config, pipelineID: pipelineID, logger: logger, + agentInfo: agentInfo, downloader: fetcher, verifier: verifier, installer: installer, diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml index 38b251d95dc..82a47adc999 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_output_true-filebeat.yml @@ -17,11 +17,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: enabled: true diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml index 6e768db6aa4..1406a2dff65 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/enabled_true-filebeat.yml @@ -18,11 +18,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml index 01ee955e4ec..524d6451f28 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-filebeat.yml @@ -19,11 +19,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - type: log paths: - /var/log/hello3.log @@ -43,11 +43,11 @@ filebeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: diff --git a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml index d09e80accf1..2889e7605eb 100644 --- a/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml +++ b/x-pack/elastic-agent/pkg/agent/program/testdata/single_config-metricbeat.yml @@ -16,11 +16,11 @@ metricbeat: fields: dataset: docker.status - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: docker metricsets: [info] index: metrics-generic-default @@ -37,11 +37,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false - module: apache metricsets: [info] index: metrics-generic-testing @@ -61,11 +61,11 @@ metricbeat: fields: dataset: generic - add_fields: - target: "elastic" + target: "elastic_agent" fields: - agent.id: agent-id - agent.version: 8.0.0 - agent.snapshot: false + id: agent-id + version: 8.0.0 + snapshot: false output: elasticsearch: hosts: [127.0.0.1:9200, 127.0.0.1:9300] diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go index 29ff1786d1e..42acd53d21a 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules.go @@ -715,11 +715,11 @@ func (r *InjectAgentInfoRule) Apply(agentInfo AgentInfo, ast *AST) error { // elastic.agent processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic"}}) + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic_agent"}}) processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "agent.id", value: &StrVal{value: agentInfo.AgentID()}}, - &Key{name: "agent.version", value: &StrVal{value: agentInfo.Version()}}, - &Key{name: "agent.snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, + &Key{name: "id", value: &StrVal{value: agentInfo.AgentID()}}, + &Key{name: "version", value: &StrVal{value: agentInfo.Version()}}, + &Key{name: "snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, }}}) addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy("").InjectItem(processorsList.value, addFieldsMap) diff --git a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go index d92ba0de985..0fb59107844 100644 --- a/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go +++ b/x-pack/elastic-agent/pkg/agent/transpiler/rules_test.go @@ -184,11 +184,11 @@ inputs: type: file processors: - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 - name: With processors type: file processors: @@ -197,11 +197,11 @@ inputs: fields: data: more - add_fields: - target: elastic + target: elastic_agent fields: - agent.id: agent-id - agent.snapshot: false - agent.version: 8.0.0 + id: agent-id + snapshot: false + version: 8.0.0 `, rule: &RuleList{ Rules: []Rule{ From a10dca7959a5c09391e853d6e8d3e45bbee0b10f Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Tue, 20 Oct 2020 10:32:11 -0600 Subject: [PATCH 39/93] [Filebeat] Add max_number_of_messages config parameter for S3 input (#21993) --- CHANGELOG.next.asciidoc | 1 + .../docs/inputs/input-aws-s3.asciidoc | 89 ++++++++++--------- x-pack/filebeat/input/s3/collector.go | 18 ++-- x-pack/filebeat/input/s3/config.go | 22 +++-- x-pack/filebeat/input/s3/input.go | 2 +- 5 files changed, 68 insertions(+), 64 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index fa8d1fc2791..f2750175969 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -634,6 +634,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Adding support for Microsoft 365 Defender (Microsoft Threat Protection) {pull}21446[21446] - Adding support for FIPS in s3 input {pull}21446[21446] - Add SSL option to checkpoint module {pull}19560[19560] +- Add max_number_of_messages config into s3 input. {pull}21993[21993] *Heartbeat* diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index 5cbe4685cb8..3ea37b3c754 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -38,24 +38,32 @@ The `s3` input supports the following configuration options plus the <<{beatname_lc}-input-{type}-common-options>> described later. [float] -==== `queue_url` - -URL of the AWS SQS queue that messages will be received from. Required. - -[float] -==== `fips_enabled` - -Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. +==== `api_timeout` -[float] -==== `visibility_timeout` +The maximum duration of the AWS API call. If it exceeds the timeout, the AWS API +call will be interrupted. +The default AWS API call timeout for a message is 120 seconds. The minimum +is 0 seconds. The maximum is half of the visibility timeout value. -The duration that the received messages are hidden from subsequent -retrieve requests after being retrieved by a ReceiveMessage request. -This value needs to be a lot bigger than {beatname_uc} collection frequency so -if it took too long to read the s3 log, this sqs message will not be reprocessed. -The default visibility timeout for a message is 300 seconds. The minimum -is 0 seconds. The maximum is 12 hours. +["source","json"] +---- +{ + "Records": [ + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:51:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", + }, + { + "eventVersion": "1.07", + "eventTime": "2019-11-14T00:52:00Z", + "awsRegion": "us-east-1", + "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", + } + ] +} +---- [float] ==== `expand_event_list_from_field` @@ -93,40 +101,33 @@ file_selectors: - regex: '^AWSLogs/\d+/CloudTrail/' expand_event_list_from_field: 'Records' - regex: '^AWSLogs/\d+/CloudTrail-Digest' -``` ---- +[float] +==== `fips_enabled` + +Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. [float] -==== `api_timeout` +==== `max_number_of_messages` +The maximum number of messages to return. Amazon SQS never returns more messages +than this value (however, fewer messages might be returned). +Valid values: 1 to 10. Default: 5. -The maximum duration of AWS API can take. If it exceeds the timeout, AWS API -will be interrupted. -The default AWS API timeout for a message is 120 seconds. The minimum -is 0 seconds. The maximum is half of the visibility timeout value. +[float] +==== `queue_url` -["source","json"] ----- -{ - "Records": [ - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:51:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLE8-9621-4d00-b913-beca2EXAMPLE", - ... - }, - { - "eventVersion": "1.07", - "eventTime": "2019-11-14T00:52:00Z", - "awsRegion": "us-east-1", - "eventID": "EXAMPLEc-28be-486c-8928-49ce6EXAMPLE", - ... - } - ] -} -``` ----- +URL of the AWS SQS queue that messages will be received from. Required. + +[float] +==== `visibility_timeout` + +The duration that the received messages are hidden from subsequent +retrieve requests after being retrieved by a ReceiveMessage request. +This value needs to be a lot bigger than {beatname_uc} collection frequency so +if it took too long to read the s3 log, this sqs message will not be reprocessed. +The default visibility timeout for a message is 300 seconds. The minimum +is 0 seconds. The maximum is 12 hours. [float] ==== `aws credentials` diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index 1b890513284..c3d3114c723 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -82,17 +82,11 @@ type s3Context struct { errC chan error } -var ( - // The maximum number of messages to return. Amazon SQS never returns more messages - // than this value (however, fewer messages might be returned). - maxNumberOfMessage uint8 = 10 - - // The duration (in seconds) for which the call waits for a message to arrive - // in the queue before returning. If a message is available, the call returns - // sooner than WaitTimeSeconds. If no messages are available and the wait time - // expires, the call returns successfully with an empty list of messages. - waitTimeSecond uint8 = 10 -) +// The duration (in seconds) for which the call waits for a message to arrive +// in the queue before returning. If a message is available, the call returns +// sooner than WaitTimeSeconds. If no messages are available and the wait time +// expires, the call returns successfully with an empty list of messages. +var waitTimeSecond uint8 = 10 func (c *s3Collector) run() { defer c.logger.Info("s3 input worker has stopped.") @@ -205,7 +199,7 @@ func (c *s3Collector) receiveMessage(svcSQS sqsiface.ClientAPI, visibilityTimeou &sqs.ReceiveMessageInput{ QueueUrl: &c.config.QueueURL, MessageAttributeNames: []string{"All"}, - MaxNumberOfMessages: awssdk.Int64(int64(maxNumberOfMessage)), + MaxNumberOfMessages: awssdk.Int64(int64(c.config.MaxNumberOfMessages)), VisibilityTimeout: &visibilityTimeout, WaitTimeSeconds: awssdk.Int64(int64(waitTimeSecond)), }) diff --git a/x-pack/filebeat/input/s3/config.go b/x-pack/filebeat/input/s3/config.go index cc3c5318289..6dc0746ce5f 100644 --- a/x-pack/filebeat/input/s3/config.go +++ b/x-pack/filebeat/input/s3/config.go @@ -13,13 +13,14 @@ import ( ) type config struct { + APITimeout time.Duration `config:"api_timeout"` + ExpandEventListFromField string `config:"expand_event_list_from_field"` + FileSelectors []FileSelectorCfg `config:"file_selectors"` + FipsEnabled bool `config:"fips_enabled"` + MaxNumberOfMessages int `config:"max_number_of_messages"` QueueURL string `config:"queue_url" validate:"nonzero,required"` VisibilityTimeout time.Duration `config:"visibility_timeout"` - FipsEnabled bool `config:"fips_enabled"` AwsConfig awscommon.ConfigAWS `config:",inline"` - ExpandEventListFromField string `config:"expand_event_list_from_field"` - APITimeout time.Duration `config:"api_timeout"` - FileSelectors []FileSelectorCfg `config:"file_selectors"` } // FileSelectorCfg defines type and configuration of FileSelectors @@ -31,9 +32,10 @@ type FileSelectorCfg struct { func defaultConfig() config { return config{ - VisibilityTimeout: 300 * time.Second, - APITimeout: 120 * time.Second, - FipsEnabled: false, + APITimeout: 120 * time.Second, + FipsEnabled: false, + MaxNumberOfMessages: 5, + VisibilityTimeout: 300 * time.Second, } } @@ -42,10 +44,12 @@ func (c *config) Validate() error { return fmt.Errorf("visibility timeout %v is not within the "+ "required range 0s to 12h", c.VisibilityTimeout) } + if c.APITimeout < 0 || c.APITimeout > c.VisibilityTimeout/2 { return fmt.Errorf("api timeout %v needs to be larger than"+ " 0s and smaller than half of the visibility timeout", c.APITimeout) } + for i := range c.FileSelectors { r, err := regexp.Compile(c.FileSelectors[i].RegexString) if err != nil { @@ -53,5 +57,9 @@ func (c *config) Validate() error { } c.FileSelectors[i].Regex = r } + + if c.MaxNumberOfMessages > 10 || c.MaxNumberOfMessages < 1 { + return fmt.Errorf(" max_number_of_messages %v needs to be between 1 and 10", c.MaxNumberOfMessages) + } return nil } diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index d76e5b8b728..36f160d759e 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -106,7 +106,7 @@ func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3C } log.Debug("s3 service name = ", s3Servicename) - + log.Debug("s3 input config max_number_of_messages = ", in.config.MaxNumberOfMessages) return &s3Collector{ cancellation: ctxtool.FromCanceller(ctx.Cancelation), logger: log, From 5935293e6efa2bb3900fe31d58111f7e557e795a Mon Sep 17 00:00:00 2001 From: Luca Belluccini Date: Wed, 21 Oct 2020 02:22:16 +0200 Subject: [PATCH 40/93] [DOC] Add firewall as possible troubleshooting issue (#21743) * [DOC] Add firewall as possible troubleshooting issue In case a firewall closes long persistent connections between Beats & Logstash, errors such as `write tcp ... write: connection reset by peer` will be reported by a given Beat. This documentation page should be useful to identify this kind of issues. * Update shared-faq.asciidoc Amend * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Update libbeat/docs/shared-faq.asciidoc Co-authored-by: DeDe Morton * Make title more descriptive Co-authored-by: Luca Belluccini Co-authored-by: DeDe Morton --- libbeat/docs/shared-faq.asciidoc | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/libbeat/docs/shared-faq.asciidoc b/libbeat/docs/shared-faq.asciidoc index 9aa8c3442c1..d6c48b73aa9 100644 --- a/libbeat/docs/shared-faq.asciidoc +++ b/libbeat/docs/shared-faq.asciidoc @@ -54,6 +54,27 @@ connect to the Lumberjack input plugin. To learn how to install and update plugins, see {logstash-ref}/working-with-plugins.html[Working with plugins]. endif::[] +ifndef::no-output-logstash[] +[[publishing-ls-fails-connection-reset-by-peer]] +=== Publishing to {ls} fails with "connection reset by peer" message + +{beatname_uc} requires a persistent TCP connection to {ls}. If a firewall interferes +with the connection, you might see errors like this: + +[source,shell] +---------------------------------------------------------------------- +Failed to publish events caused by: write tcp ... write: connection reset by peer +---------------------------------------------------------------------- + + +To solve the problem: + +* make sure the firewall is not closing connections between {beatname_uc} and {ls}, or +* set the `ttl` value in the <> to a value that's +lower than the maximum time allowed by the firewall, and set `pipelining` to 0 +(pipelining cannot be enabled when `ttl` is used). +endif::[] + ifndef::no-output-logstash[] [[metadata-missing]] === @metadata is missing in {ls} From 65df4e14ebacfd71fc24564385c5662cd8261786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Wed, 21 Oct 2020 12:33:23 +0200 Subject: [PATCH 41/93] feat: package aliases for snapshots (#21960) * feat: push aliases for docker images * feat: build alias for snapshots * fix: only update alias on snapshots Co-authored-by: Jaime Soriano Pastor * fix: wrong image name for alias * fix: reuse variable as groovy does not hide variables by scope * chore: extract common logic to a method * Revert "fix: only update alias on snapshots" This reverts commit cff2cef82cb107bfddeca5caf225a9307db72135. * Revert "feat: build alias for snapshots" This reverts commit 707e0d71556553b15388adec0c7118ff89210ac9. * chore: do not push aliases for PRs Co-authored-by: Jaime Soriano Pastor --- .ci/packaging.groovy | 56 ++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 8936de2fb3e..91902595a3c 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -191,10 +191,14 @@ def pushCIDockerImages(){ } } -def tagAndPush(name){ +def tagAndPush(beatName){ def libbetaVer = sh(label: 'Get libbeat version', script: 'grep defaultBeatVersion ${BASE_DIR}/libbeat/version/version.go|cut -d "=" -f 2|tr -d \\"', returnStdout: true)?.trim() + def aliasVersion = "" if("${env.SNAPSHOT}" == "true"){ + aliasVersion = libbetaVer.substring(0, libbetaVer.lastIndexOf(".")) // remove third number in version + libbetaVer += "-SNAPSHOT" + aliasVersion += "-SNAPSHOT" } def tagName = "${libbetaVer}" @@ -207,25 +211,37 @@ def tagAndPush(name){ // supported image flavours def variants = ["", "-oss", "-ubi8"] variants.each { variant -> - def oldName = "${DOCKER_REGISTRY}/beats/${name}${variant}:${libbetaVer}" - def newName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${tagName}" - def commitName = "${DOCKER_REGISTRY}/observability-ci/${name}${variant}:${env.GIT_BASE_COMMIT}" - - def iterations = 0 - retryWithSleep(retries: 3, seconds: 5, backoff: true) { - iterations++ - def status = sh(label:'Change tag and push', script: """ - docker tag ${oldName} ${newName} - docker push ${newName} - docker tag ${oldName} ${commitName} - docker push ${commitName} - """, returnStatus: true) - - if ( status > 0 && iterations < 3) { - error('tag and push failed, retry') - } else if ( status > 0 ) { - log(level: 'WARN', text: "${name} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") - } + doTagAndPush(beatName, variant, libbetaVer, tagName) + doTagAndPush(beatName, variant, libbetaVer, "${env.GIT_BASE_COMMIT}") + + if (!isPR() && aliasVersion != "") { + doTagAndPush(beatName, variant, libbetaVer, aliasVersion) + } + } +} + +/** +* @param beatName name of the Beat +* @param variant name of the variant used to build the docker image name +* @param sourceTag tag to be used as source for the docker tag command, usually under the 'beats' namespace +* @param targetTag tag to be used as target for the docker tag command, usually under the 'observability-ci' namespace +*/ +def doTagAndPush(beatName, variant, sourceTag, targetTag) { + def sourceName = "${DOCKER_REGISTRY}/beats/${beatName}${variant}:${sourceTag}" + def targetName = "${DOCKER_REGISTRY}/observability-ci/${beatName}${variant}:${targetTag}" + + def iterations = 0 + retryWithSleep(retries: 3, seconds: 5, backoff: true) { + iterations++ + def status = sh(label: "Change tag and push ${targetName}", script: """ + docker tag ${sourceName} ${targetName} + docker push ${targetName} + """, returnStatus: true) + + if ( status > 0 && iterations < 3) { + error("tag and push failed for ${beatName}, retry") + } else if ( status > 0 ) { + log(level: 'WARN', text: "${beatName} doesn't have ${variant} docker images. See https://github.com/elastic/beats/pull/21621") } } } From bb50d32ead2945b5c982e7975ab6ef6b3625860a Mon Sep 17 00:00:00 2001 From: William Deurwaarder Date: Wed, 21 Oct 2020 14:45:12 +0200 Subject: [PATCH 42/93] Prevent log input from sending duplicate messages due to file renaming (#21911) Input:Log: Reset TTL of registry state when a file is renamed. In some rare cases the registry state is marked for removal (TTL is set to 0) while the file is only renamed. Log detects the renaming of the file and updates the name of the file. As the file still exists it should also update the TTL of the renamed file. --- filebeat/input/log/input.go | 1 + 1 file changed, 1 insertion(+) diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index 365da416ed3..1b203adcf5e 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -566,6 +566,7 @@ func (p *Input) harvestExistingFile(newState file.State, oldState file.State) { logp.Debug("input", "Updating state for renamed file: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset) // Update state because of file rotation oldState.Source = newState.Source + oldState.TTL = newState.TTL err := p.updateState(oldState) if err != nil { logp.Err("File rotation state update error: %s", err) From 374467e49016706dfdd927e04b5ea8a86cebdc66 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 21 Oct 2020 14:56:44 +0200 Subject: [PATCH 43/93] [Ingest Manager] Use ML_SYSTEM to detect if agent is running as a service (#21884) [Ingest Manager] Use ML_SYSTEM to detect if agent is running as a service (#21884) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/install/svc_windows.go | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 3882ba19712..7088904a820 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -17,6 +17,7 @@ - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] diff --git a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go b/x-pack/elastic-agent/pkg/agent/install/svc_windows.go index 9084f3b5ea7..a60aadb5494 100644 --- a/x-pack/elastic-agent/pkg/agent/install/svc_windows.go +++ b/x-pack/elastic-agent/pkg/agent/install/svc_windows.go @@ -10,10 +10,14 @@ import ( "golang.org/x/sys/windows" ) +const ( + ML_SYSTEM_RID = 0x4000 +) + // RunningUnderSupervisor returns true when executing Agent is running under // the supervisor processes of the OS. func RunningUnderSupervisor() bool { - serviceSid, err := allocSid(windows.SECURITY_SERVICE_RID) + serviceSid, err := allocSid(ML_SYSTEM_RID) if err != nil { return false } @@ -40,7 +44,7 @@ func RunningUnderSupervisor() bool { func allocSid(subAuth0 uint32) (*windows.SID, error) { var sid *windows.SID - err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, + err := windows.AllocateAndInitializeSid(&windows.SECURITY_MANDATORY_LABEL_AUTHORITY, 1, subAuth0, 0, 0, 0, 0, 0, 0, 0, &sid) if err != nil { return nil, err From fc007701ecc42f7c6dc0e11762029944539fe1b2 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Wed, 21 Oct 2020 11:26:19 -0700 Subject: [PATCH 44/93] Fix typo (#19585) (#22061) Co-authored-by: Byungjin Park (BJ) --- heartbeat/docs/monitors/monitor-http.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/heartbeat/docs/monitors/monitor-http.asciidoc b/heartbeat/docs/monitors/monitor-http.asciidoc index ea981ea62b7..33d29dec89a 100644 --- a/heartbeat/docs/monitors/monitor-http.asciidoc +++ b/heartbeat/docs/monitors/monitor-http.asciidoc @@ -161,7 +161,7 @@ Under `check.response`, specify these options: *`status`*:: A list of expected status codes. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. *`headers`*:: The required response headers. -*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response +*`body`*:: A list of regular expressions to match the body output. Only a single expression needs to match. HTTP response bodies of up to 100MiB are supported. Example configuration: From ba2b2f935f1c6badc316f62417d87d630991ad2f Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 21 Oct 2020 11:43:43 -0700 Subject: [PATCH 45/93] revert WSS process reporting for windows (#22055) * revert WSS process reporting for windows * add changelog --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/system/process/process.go | 15 +-------------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index f2750175969..059bbdb1cf6 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -378,6 +378,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] *Packetbeat* diff --git a/metricbeat/module/system/process/process.go b/metricbeat/module/system/process/process.go index c99ffaa1123..141a4a3a62d 100644 --- a/metricbeat/module/system/process/process.go +++ b/metricbeat/module/system/process/process.go @@ -150,24 +150,11 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { rootFields.Put("process.args", args) } - // This is a temporary fix until we make these changes global across libbeat - // This logic should happen in libbeat getProcessEvent() - - // There's some more Windows memory quirks we need to deal with. - // "rss" is a linux concept, but "wss" is a direct match on Windows. - // "share" is also unavailable on Windows. + // "share" is unavailable on Windows. if runtime.GOOS == "windows" { proc.Delete("memory.share") } - if m.IsAgent { - if runtime.GOOS == "windows" { - if setSize := getAndRemove(proc, "memory.rss"); setSize != nil { - proc.Put("memory.wss", setSize) - } - } - } - e := mb.Event{ RootFields: rootFields, MetricSetFields: proc, From 215f49cf50a079d5f0963eeab3d1336d897c36f9 Mon Sep 17 00:00:00 2001 From: Ichinose Shogo Date: Thu, 22 Oct 2020 17:03:50 +0900 Subject: [PATCH 46/93] Fix the url of reviewdog (#21981) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c21c0a7346e..d64bb07776b 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ GOLINT=golint GOLINT_REPO=golang.org/x/lint/golint REVIEWDOG=reviewdog REVIEWDOG_OPTIONS?=-diff "git diff master" -REVIEWDOG_REPO=github.com/haya14busa/reviewdog/cmd/reviewdog +REVIEWDOG_REPO=github.com/reviewdog/reviewdog/cmd/reviewdog XPACK_SUFFIX=x-pack/ # PROJECTS_XPACK_PKG is a list of Beats that have independent packaging support From 69cddaa1a0979a65c0bd8e3362ad69f5f9125652 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 22 Oct 2020 11:38:09 +0100 Subject: [PATCH 47/93] [build][packaging] Add resilience when docker build (#22050) --- dev-tools/mage/dockerbuilder.go | 9 ++++++++- x-pack/elastic-agent/magefile.go | 13 +++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/dev-tools/mage/dockerbuilder.go b/dev-tools/mage/dockerbuilder.go index 90a99434884..503fcae9cfc 100644 --- a/dev-tools/mage/dockerbuilder.go +++ b/dev-tools/mage/dockerbuilder.go @@ -26,6 +26,7 @@ import ( "os/exec" "path/filepath" "strings" + "time" "github.com/magefile/mage/sh" "github.com/pkg/errors" @@ -71,7 +72,13 @@ func (b *dockerBuilder) Build() error { tag, err := b.dockerBuild() if err != nil { - return errors.Wrap(err, "failed to build docker") + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + tag, err = b.dockerBuild() + if err != nil { + return errors.Wrap(err, "failed to build docker") + } } if err := b.dockerSave(tag); err != nil { diff --git a/x-pack/elastic-agent/magefile.go b/x-pack/elastic-agent/magefile.go index a1aaba840fb..fad5ef935aa 100644 --- a/x-pack/elastic-agent/magefile.go +++ b/x-pack/elastic-agent/magefile.go @@ -513,8 +513,13 @@ func runAgent(env map[string]string) error { } // build docker image - if err := sh.Run("docker", "build", "-t", tag, "."); err != nil { - return err + if err := dockerBuild(tag); err != nil { + fmt.Println(">> Building docker images again (after 10 seconds)") + // This sleep is to avoid hitting the docker build issues when resources are not available. + time.Sleep(10) + if err := dockerBuild(tag); err != nil { + return err + } } } @@ -625,6 +630,10 @@ func copyAll(from, to string) error { }) } +func dockerBuild(tag string) error { + return sh.Run("docker", "build", "-t", tag, ".") +} + func dockerTag() string { const commitLen = 7 tagBase := "elastic-agent" From 5553dc24d26e0a12119083a39df0a904dbb7e2d9 Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:38 +0200 Subject: [PATCH 48/93] docs: Prepare Changelog for 6.8.13 (#22072) (#22079) * docs: Close changelog for 6.8.13 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo # Conflicts: # CHANGELOG.asciidoc # libbeat/docs/release.asciidoc Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 40 +++++++++++++++++++++++++++++++++++ libbeat/docs/release.asciidoc | 4 ++++ 2 files changed, 44 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 1dfbb2fb889..5c364aeae64 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2575,6 +2575,46 @@ https://github.com/elastic/beats/compare/v6.5.0...v7.0.0-alpha1[View commits] - Added support to calculate certificates' fingerprints (MD5, SHA-1, SHA-256). {issue}8180[8180] - Support new TLS version negotiation introduced in TLS 1.3. {issue}8647[8647]. +[[release-notes-6.8.13]] +=== Beats version 6.8.13 +https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] + +==== Added + +*Filebeat* + +- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] + +[[release-notes-6.8.12]] +=== Beats version 6.8.12 +https://github.com/elastic/beats/compare/v6.8.11...v6.8.12[View commits] + +==== Bugfixes + +*Filebeat* + +- Fix Filebeat OOMs on very long lines {issue}19500[19500], {pull}19552[19552] + +[[release-notes-6.8.11]] +=== Beats version 6.8.11 +https://github.com/elastic/beats/compare/v6.8.10...v6.8.11[View commits] + +==== Bugfixes + +*Metricbeat* + +- Fix bug incorrect parsing of float numbers as integers in Couchbase module {issue}18949[18949] {pull}19055[19055] + +[[release-notes-6.8.10]] +=== Beats version 6.8.10 +https://github.com/elastic/beats/compare/v6.8.9...v6.8.10[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix `add_cloud_metadata` to better support modifying sub-fields with other processors. {pull}13808[13808] + [[release-notes-6.8.9]] === Beats version 6.8.9 https://github.com/elastic/beats/compare/v6.8.8...v6.8.9[View commits] diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 90dd214787a..caf94c3bf2d 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -39,6 +39,10 @@ upgrade. * <> * <> * <> +* <> +* <> +* <> +* <> * <> * <> * <> From 82c5855d965722f281ad611d11c01898084512bb Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Thu, 22 Oct 2020 13:23:51 +0200 Subject: [PATCH 49/93] docs: Prepare Changelog for 7.9.3 (#22073) (#22075) * docs: Close changelog for 7.9.3 * Apply suggestions from code review Co-authored-by: kuisathaverat Co-authored-by: Ivan Fernandez Calvo Co-authored-by: Elastic Machine --- CHANGELOG.asciidoc | 24 ++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 4 ++++ libbeat/docs/release.asciidoc | 1 + 3 files changed, 29 insertions(+) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 5c364aeae64..349eb49edb3 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,30 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-7.9.3]] +=== Beats version 7.9.3 +https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] + +*Auditbeat* + +- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] + +*Filebeat* + +- Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] +*Metricbeat* + +- Fix remote_write flaky test. {pull}21173[21173] +- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] +- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] + + [[release-notes-7.9.2]] === Beats version 7.9.2 https://github.com/elastic/beats/compare/v7.9.1...v7.9.2[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 059bbdb1cf6..a2dcaa48f2d 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -379,6 +379,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] - Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] +- Add a switch to the driver definition on SQL module to use pretty names {pull}17378[17378] *Packetbeat* @@ -821,3 +822,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index caf94c3bf2d..724d8af03c3 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From fb6d8ef3b7e3fb13af5cbd73220ac785aa50dead Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Thu, 22 Oct 2020 15:34:38 +0200 Subject: [PATCH 50/93] chore: use ubuntu 18 as linux agent (#22084) --- .ci/packaging.groovy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 91902595a3c..073c977a22e 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -43,7 +43,7 @@ pipeline { } stages { stage('Filter build') { - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } when { beforeAgent true anyOf { @@ -98,7 +98,7 @@ pipeline { } stages { stage('Package Linux'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } when { beforeAgent true @@ -160,7 +160,7 @@ pipeline { } } stage('Run E2E Tests for Packages'){ - agent { label 'ubuntu && immutable' } + agent { label 'ubuntu-18 && immutable' } options { skipDefaultCheckout() } steps { runE2ETests() From 9aefcfe692961b5cc309ad888d5960e83a3c25f8 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 22 Oct 2020 17:10:20 +0200 Subject: [PATCH 51/93] [Ingest Manager] Use symlink path for reexecutions (#21835) [Ingest Manager] Use symlink path for reexecutions (#21835) --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/cmd/run.go | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 7088904a820..b6a870e0259 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -17,6 +17,7 @@ - Fix issue where inputs without processors defined would panic {pull}21628[21628] - Prevent reporting ecs version twice {pull}21616[21616] - Partial extracted beat result in failure to spawn beat {issue}21718[21718] +- Use symlink path for reexecutions {pull}21835[21835] - Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] - Use local temp instead of system one {pull}21883[21883] - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] diff --git a/x-pack/elastic-agent/pkg/agent/cmd/run.go b/x-pack/elastic-agent/pkg/agent/cmd/run.go index 84dd8bd8a9a..b014cd69084 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/run.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/run.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "os/signal" + "path/filepath" "syscall" "github.com/spf13/cobra" @@ -26,6 +27,10 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) +const ( + agentName = "elastic-agent" +) + func newRunCommandWithArgs(flags *globalFlags, _ []string, streams *cli.IOStreams) *cobra.Command { return &cobra.Command{ Use: "run", @@ -87,7 +92,7 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { // Windows: Mark se logger.Warn("Artifact has been build with security disabled. Elastic Agent will not verify signatures of used artifacts.") } - execPath, err := os.Executable() + execPath, err := reexecPath() if err != nil { return err } @@ -146,3 +151,16 @@ func run(flags *globalFlags, streams *cli.IOStreams) error { // Windows: Mark se rex.ShutdownComplete() return err } + +func reexecPath() (string, error) { + // set executable path to symlink instead of binary + // in case of updated symlinks we should spin up new agent + potentialReexec := filepath.Join(paths.Top(), agentName) + + // in case it does not exists fallback to executable + if _, err := os.Stat(potentialReexec); os.IsNotExist(err) { + return os.Executable() + } + + return potentialReexec, nil +} From daed8f9361d6c2708d84d3764a5c9ae52b042238 Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 22 Oct 2020 11:49:26 -0400 Subject: [PATCH 52/93] Remove suricata.eve.timestamp alias (#22095) Remove the suricata.eve.timestamp alias field from the Suricata module. This is a breaking change for anything that we dependent upon the field, but its presence caused issue in Kibana since it was always displayed in Discover. Fixes #10535 --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/fields.asciidoc | 9 --------- x-pack/filebeat/module/suricata/eve/_meta/fields.yml | 4 ---- x-pack/filebeat/module/suricata/fields.go | 2 +- 4 files changed, 2 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index a2dcaa48f2d..1bf2cc8f762 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -80,6 +80,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add support for GMT timezone offsets in `decode_cef`. {pull}20993[20993] - Fix parsing of Elasticsearch node name by `elasticsearch/slowlog` fileset. {pull}14547[14547] - API address and shard ID are required settings in the Cloud Foundry input. {pull}21759[21759] +- Remove `suricata.eve.timestamp` alias field. {issue}10535[10535] {pull}22095[22095] *Heartbeat* diff --git a/filebeat/docs/fields.asciidoc b/filebeat/docs/fields.asciidoc index b4f6a158ad7..b1ee49fed5c 100644 --- a/filebeat/docs/fields.asciidoc +++ b/filebeat/docs/fields.asciidoc @@ -143952,15 +143952,6 @@ type: keyword -- -*`suricata.eve.timestamp`*:: -+ --- -type: alias - -alias to: @timestamp - --- - *`suricata.eve.in_iface`*:: + -- diff --git a/x-pack/filebeat/module/suricata/eve/_meta/fields.yml b/x-pack/filebeat/module/suricata/eve/_meta/fields.yml index 45980b888b0..dffb86e7ebe 100644 --- a/x-pack/filebeat/module/suricata/eve/_meta/fields.yml +++ b/x-pack/filebeat/module/suricata/eve/_meta/fields.yml @@ -176,10 +176,6 @@ - name: http_content_type type: keyword - - name: timestamp - type: alias - path: '@timestamp' - - name: in_iface type: keyword diff --git a/x-pack/filebeat/module/suricata/fields.go b/x-pack/filebeat/module/suricata/fields.go index 4cba3d5ee74..105704a1cc3 100644 --- a/x-pack/filebeat/module/suricata/fields.go +++ b/x-pack/filebeat/module/suricata/fields.go @@ -19,5 +19,5 @@ func init() { // AssetSuricata returns asset data. // This is the base64 encoded gzipped contents of module/suricata. func AssetSuricata() string { - return "eJzsXEuP67YV3t9fod2sYjRpExSzKLpJFwXaLgJ0SxyTRxJjvi5J2eP++kKyxyNbpCw+MEWTO6uBZ/jxvHhePPR3zQHPr40bLKfg4UvTeO4Fvja/fHzC0FHLjedavTZ/+dI0TfMPzQaBTatt04Nigquu8T02P//75+bvv/zrn43QnWuM1WygyJr9+Ya3+9I0LUfB3OuE9F2jQOIdBeOPPxt8bTqrB3P9JEDF+PO3CatprZYTBe/7TKQI3TUtF7i7/vt84/nmeMTbZ6G9V/af0YBvRlt/YXchjNmCRyoeKFGejBTc/fmdqAOeT9qy29+CGGAMMVZ7TbTlXT6Opya4+FEyMZ7ukEgr4E4Km8n5gDGujwDstRYI6hnAjQ7iaRkpQA9lpLizKgTw4B+NJJGJmTgKNWOdL+Om5Vni+FgukKtWV7JX18P3ZQIZCRp/i6CA4LAUuQHfX5buxl+fqu+NcBbZQGjVfYIFOa8txmjYqPoOTMz6th6FHn748acyTiT7sVAU/D/Z2r5bGzRwTqWJh4W4tt/XM3SejOEpuD5E4IW8cR1XMEa93d3y4C7O0pxNnB4sxQ34U1RLBFfoT9oedt6Cchu2oGAIVWEWnst5lAAPh82n/M/WhXWoHpdnx+OlIaU6fFsDY8VDbsVgHzljJkSpG/VeFKy2VLNCGWRQfwtTQp+W3CfkiCiBP7KfHXU9+CErEbnzcsnHb+7jnp3ByQ8HVLZV4L33tZLqVXGthZyRhp1FZ7RyuLvA3PMUs1Vk3CKNpXobzXXcngwOLYEOF452CwMfi3djhcMViGebTpGD6thJTSHdYos2X+xfB3R+N4HYGU5kx8HGSF4VkBWbJdNr5zMz1XEbpiXwpxmaQNX5WPG23Vb3mp13+7NHt0lTEn2vY85xs6oeUNY2pFr5SP2eVnlzic6DTPVkL3+9rXx54scU4S3Qki6DwEiil+7KKHjstD0XZuB4RMt9DGVN4VPbZbcAiPrBY0HE78qKNt4p8IMtzBmAjuEuW1J68FTLpwHjRmxOhnUDWXR9ss1M8HjACcHFAB/iCjmidSFxblTKjFfd+hNYLEX8OBL2GA1WvyGO5zlRrSKJglk5Z3nCO6BVKIgBesBAz22LI1iAMasDDZQsKN7mg90yFjPGoAIPx9DHc8w8sYdiVRpRgby9jCSJksJyWZraJMrBLWWdwljLBZKpjVeVPW1QkRG7zJgYthYerzAKBQ9vZAQlPS8+gdwc/xSFCJMXJ/A+/dNDkLpt9M3Ua6GTqKpgWQTnUO5FoOO7FW0muWXD9pvknh9XoU9Vj4OnY0Wb60Vmbp9VcGblHhEl2o5IzZCg8hi6nMgG1KFUKg1teaWaisCpXDs6CSBxz7UNxBkIBI1PD2AhkRbltw4HpkkLPHxak0TkFLmY9ZSllWpNObSeMPAw2aIAM5FZSiS6MfEujYUXuZXSclahm/V8OSltJYgqYgpc2H+asc+ZO4LgjNAe6cENslj5U5CraKVXy6yjQ8Gdr6G80HxCoqC8RZCEofE9sQi0L/YOt6zgTKqYxgyvK1bAu4NhvG1J8JItDU9pEsxd0qoBqlnldgYcO2IOngSv79NYvG9M52GUJwgW8oQ88+SmlIajAEW+cvW1EOdFDUK8FIIIP6JEQfKLjUG5wVxG/sId/62UzkR/8MRrTZyEIMlZsaJQfl1xlrdaJleoFzea9aFGvf8T4YpUoIabtYOab5Tv+QE3Kx3VZH+g//flIVtcVjUVxFX3wI1+rzTPoIFGY2KeCW+1QtpUQZcm9dKI0oPnVhz4/1vdjNaZYjNB36NVWJrVjp65lkNDxD//4YfvYXnNn5TjBQrR8j476YTeQ6kNXbHCc7Of37kX+kRkVzcdtvrkyH5wy0v0NP5G4hy5tmOrYCldB23i0B24McUFHBXaISPGDqoYS+GpDtBFWhalPhZj7c9mLCkrsTiNINdj8WoOhKvy0vmCOPVUysv6EUrCm8BSJ28rE4XSBKZjUnwOGEMEnCvX4KuNgYLLoGDakXwPFMzHUlGC98cZxExdaRLOplPBmHIVkcKdi1QkJ2vKqQ5N0uXXUGtjTDkoXAY7e8kKo2gNraX9C1gtA9hno9wO/ts3b/I7dwB1Tttv75jErTl+PtbH5JT2e2zj80NsXixFMLhzA1oWm0fdOuyoeBlArGGWRsXlOpVYdIMsfRvZctWhNZZHx1a3D4LyQP2bhKG0h9ZHc78tinbD/tfiByS/wh/TegQMWxiEJ5Mhj8mBCBQMzyOA85ar5TBanP4lRA+rfmm7AH6H/M9rkMsQcmTUdxNKoObI9YDTvR/xenW8fG2mfv4AbdPrFuchMM/6fKPrM4e71ZEtzMFX4+lx3DmyJXQlY8MVXrO/K3J1an6N6etD3k06tAiuNNZclVRK70b9oIpFsy2uf5rBzouHi5MfPLjPn4AvHgGGU6PAe+CtadXMNz1+wUiCbwrkwLm+yVLjyUJgiWYmgQvSWr0cNkqC6VFkEbIULr4ZpEtjSnL/y2+jeS7j/wYAAP//9F32EA==" + return "eJzsXEuP47gRvs+v0G1Pa2Q32UXQh9w2hwBJDgvkSpTJksQxX0NSdju/PpDsdssWKYsPdLAz06eGu/mxXqwXi/6xOeD5pXGD5RQ8fGoaz73Al+b3908YOmq58Vyrl+Zvn5qmaf6p2SCwabVtelBMcNU1vsfmt//81vzj93//qxG6c42xmg0UWbM/3/B2n5qm5SiYe5mQfmwUSLyjYPzxZ4MvTWf1YK6fBKgYf/4+YTWt1XKi4G2fiRShu6blAnfXf59vPN8cj3j7LLT3yv4zGvDVaOsv7C6EMVvwSMUDJcqTkYK7P78RdcDzSVt2+1sQA4whxmqviba8y8fx1AQXP0omxtMdEmkF3ElhMznvMMb1EYC91gJBPQO40UE8LSMF6KGMFHdWhQAe/KORJDIxE0ehZqzzZdy0PEsc78sFctXqSvbqevipTCAjQeNvERQQHJYiN+D7y9Ld+OtT9b0SziIbCK26D7Ag57XFGA0bVd+BiVnf1qPQw8+//FrGiWS/FIqC/zdb23drgwbOqTTxsBDX9tt6hs6TMTwF14cIvJA3ruMKxqi3u1se3MVZmrOJ04OluAF/imqJ4Ar9SdvDzltQbsMWFAyhKszCczmPEuDhsPmU/9m6sA7V4/LseLw0pFSHb2tgrHjIrRjsPWfMhCh1o96LgtWWalYogwzqb2FK6NOS+4QcESXwR/azo64HP2QlIndeLvn4zX3cszM4+eGAyrYKvPe+VlK9Kq61kDPSsLPojFYOdxeYe55itoqMW6SxVG+juY7bk8GhJdDhwtFuYeB98W6scLgC8WzTKXJQHTupKaRbbNHmi/3LgM7vJhA7w4nsONgYyasCsmKzZHrtfGamOm7DtAT+NEMTqDofK9622+pes/Nuf/boNmlKou91zDluVtUDytqGVCsfqd+T/BhXhLdAS3oAAiNpWLqjoeCx0/ZcmB/jES33MZQ1dUxNkd0CIOqljgXxuCsrqXinwA+2MKIDHYNRtqT04KmWT935jdic/OcGsujJZJuZ4PFwEIKLAT54fXJE60Li3KiUGa+69SewWIr4fiTsMRpKviKO5xlLrRKGglk5Z3nCO6BVKIgBesBAR2yLI1iAMasD7Y0sKN7mg93yCeN5NNZvAWDo4xlgnthDsSqNqEBWXUaSRElhuSxNbRLl4JayTmGs5QLJ1GSryp42qMiIXWZMDFsLjxcMhYKHVzKCkp4Xn0Bujn+JQoTJixM4q/e5RD0EqdtG30y9FjqJqgqWRXAO5V4E+rFb0WaSW7ZTv0vu+XEV+lT1OHg61pu5XmTm9lkFZ1buEVGi7YjUDAkqj6Grg2xAHUql0tCWF56pCJzKtaOTABL3XNtAnIFA0PjwABYSaVF+63BgmrTAw6c1SUROkYtZT1laqdaUQ+sJAw+TLQowE5mlRKIbE+/SWHiRWyktZxW6986Xk9JWgqgipsB1+ocZ+5y5IwjOCO2RHtwgi5U/BbmKVnq1zDo6FNz5GsoLTQ8kCspbBEkYGt8Ti0D7Yu9wywrOpIppzPC6YgW8ORjG25YEr8DS8JQmwdwlrRqgmlVuZ8CxI+bgSfByPY3F+7ZxHkZ5gmAhT8gzT25KaTgKUOQLV18KcX5QgxA/FIIIP6JEQfKLjUG5wVwG8sL9+K2UzkR/8MRrTZyEIMlZsaJQfl1xlrdaJleoFzea9aFGvf8r4YpUoIabtYOab5Rv+QE3Kx3VZH+g///lIVtcVjUVxFX3wI1+rzTPoIFGY2KeCa+1QtpUQZcm9dKI0oPnVhz4H61uRutMsZmg79EqLM1qR89cy6Eh4l//9PNPsLyET8rxAoVoeZ+ddELvodSGrljhqdaP79wLfSKyq5sOW31yZD+45SV6Gn8jcY5c27FVsJSugzZx6A7cmOICjgrtkBFjB1WMpfBUB+giLYtSH4ux9mczlpSVWJwGhOuxeDUHwlV56XxBnHoq5WX9CCXhVWCpk7eViUJpAtMxKT4HjCECzpVr8NXGQMFlUDDtSL4HCuZjqSjB++MMYqauNAln06lgTLmKSOHORSqSkzXlVIcm6fJrqLUxphwULoOdvWSFUbSG1tL+BayWAeyzUW4H//W7N/nGHUCd0/b1HZO4NcfPx/qYnNJ+j218fojNi6UIBnduQMti86hbhx0VLwOINczSqLhcpxKLbpClLxdbrjq0xvLo2Or2QVAeqH+TMJT20Ppo7rdF0W7Yfy5+3vEZ/pzWI2DYwiA8mQx5TA5EoGB4HgGct1wth9Hi9C8helj1S9sF8A3yP69BLkPIkVHfTSiBmiPXA073fsTr1fHytZn6+fOwTW9PnIfAPOvzja7PHO5WR7YwB1+Np8dx58iW0JWMDVd4a/6myNWp+TWmr89sN+nQIrjSWHNVUim9G/WDKhbNtrj+aQY7Lx4uTn7w4D5/oL14ohdOjQKvdbemVTPf9Pj1Hwm+KZAD5/omS40nC4ElmpkELkhr9XLYKAmmR5FFyFK4+GqQLo0pyf0vvyvmuYz/FwAA//8GEN89" } From 5d077092d3e0aacfecae81ea307a3c6fda748705 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 22 Oct 2020 11:34:32 -0600 Subject: [PATCH 53/93] Add max_number_of_messages into aws filebeat fileset vars (#22057) --- x-pack/filebeat/filebeat.reference.yml | 36 +++++++++++++++++++ x-pack/filebeat/module/aws/_meta/config.yml | 36 +++++++++++++++++++ .../module/aws/cloudtrail/config/s3.yml | 4 +++ .../module/aws/cloudtrail/manifest.yml | 2 ++ .../module/aws/cloudwatch/config/s3.yml | 4 +++ .../module/aws/cloudwatch/manifest.yml | 2 ++ x-pack/filebeat/module/aws/ec2/config/s3.yml | 4 +++ x-pack/filebeat/module/aws/ec2/manifest.yml | 2 ++ x-pack/filebeat/module/aws/elb/config/s3.yml | 4 +++ x-pack/filebeat/module/aws/elb/manifest.yml | 2 ++ .../module/aws/s3access/config/s3.yml | 4 +++ .../filebeat/module/aws/s3access/manifest.yml | 2 ++ .../module/aws/vpcflow/config/input.yml | 4 +++ .../filebeat/module/aws/vpcflow/manifest.yml | 2 ++ x-pack/filebeat/modules.d/aws.yml.disabled | 36 +++++++++++++++++++ 15 files changed, 144 insertions(+) diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 49ede1c7d24..f10a46aa20e 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -142,6 +142,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -176,6 +182,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -210,6 +222,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -244,6 +262,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -278,6 +302,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -312,6 +342,12 @@ filebeat.modules: # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + #-------------------------------- Azure Module -------------------------------- - module: azure # All logs diff --git a/x-pack/filebeat/module/aws/_meta/config.yml b/x-pack/filebeat/module/aws/_meta/config.yml index b7e0c25b674..e4b521e467f 100644 --- a/x-pack/filebeat/module/aws/_meta/config.yml +++ b/x-pack/filebeat/module/aws/_meta/config.yml @@ -45,6 +45,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -79,6 +85,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -113,6 +125,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -147,6 +165,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -181,6 +205,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -214,3 +244,9 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml index ac1caacf21c..d11da6c6a52 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/s3.yml @@ -55,6 +55,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml index 732967ff0b0..03c7acf1336 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml @@ -21,6 +21,8 @@ var: default: true - name: process_insight_logs default: true + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml b/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml index 2878c79936d..5d9931b2e40 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/ec2/config/s3.yml b/x-pack/filebeat/module/aws/ec2/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/ec2/config/s3.yml +++ b/x-pack/filebeat/module/aws/ec2/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/ec2/manifest.yml b/x-pack/filebeat/module/aws/ec2/manifest.yml index 2878c79936d..5d9931b2e40 100644 --- a/x-pack/filebeat/module/aws/ec2/manifest.yml +++ b/x-pack/filebeat/module/aws/ec2/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/elb/config/s3.yml b/x-pack/filebeat/module/aws/elb/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/elb/config/s3.yml +++ b/x-pack/filebeat/module/aws/elb/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/elb/manifest.yml b/x-pack/filebeat/module/aws/elb/manifest.yml index f823ccbacce..dc95f6abb7e 100644 --- a/x-pack/filebeat/module/aws/elb/manifest.yml +++ b/x-pack/filebeat/module/aws/elb/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/s3access/config/s3.yml b/x-pack/filebeat/module/aws/s3access/config/s3.yml index bdb0ff350f0..7364f997a65 100644 --- a/x-pack/filebeat/module/aws/s3access/config/s3.yml +++ b/x-pack/filebeat/module/aws/s3access/config/s3.yml @@ -41,6 +41,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/s3access/manifest.yml b/x-pack/filebeat/module/aws/s3access/manifest.yml index 2878c79936d..5d9931b2e40 100644 --- a/x-pack/filebeat/module/aws/s3access/manifest.yml +++ b/x-pack/filebeat/module/aws/s3access/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/vpcflow/config/input.yml b/x-pack/filebeat/module/aws/vpcflow/config/input.yml index 628196b7d3e..de4affbd694 100644 --- a/x-pack/filebeat/module/aws/vpcflow/config/input.yml +++ b/x-pack/filebeat/module/aws/vpcflow/config/input.yml @@ -43,6 +43,10 @@ role_arn: {{ .role_arn }} fips_enabled: {{ .fips_enabled }} {{ end }} +{{ if .max_number_of_messages }} +max_number_of_messages: {{ .max_number_of_messages }} +{{ end }} + {{ else if eq .input "file" }} type: log diff --git a/x-pack/filebeat/module/aws/vpcflow/manifest.yml b/x-pack/filebeat/module/aws/vpcflow/manifest.yml index c7df14a4050..19f40c7a3f7 100644 --- a/x-pack/filebeat/module/aws/vpcflow/manifest.yml +++ b/x-pack/filebeat/module/aws/vpcflow/manifest.yml @@ -15,6 +15,8 @@ var: - name: role_arn - name: tags default: [forwarded] + - name: fips_enabled + - name: max_number_of_messages ingest_pipeline: ingest/pipeline.yml input: config/input.yml diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index 0fe8465211b..f3d2ac1f7c9 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -48,6 +48,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + cloudwatch: enabled: false @@ -82,6 +88,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + ec2: enabled: false @@ -116,6 +128,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + elb: enabled: false @@ -150,6 +168,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + s3access: enabled: false @@ -184,6 +208,12 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 + vpcflow: enabled: false @@ -217,3 +247,9 @@ # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb + + # Enabling this option changes the service name from `s3` to `s3-fips` for connecting to the correct service endpoint. + #var.fips_enabled: false + + # The maximum number of messages to return from SQS. Valid values: 1 to 10. + #var.max_number_of_messages: 5 From cc2217ced1dd549dbbed0abbd048caac6150ecf7 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 22 Oct 2020 12:45:17 -0600 Subject: [PATCH 54/93] Check context.Canceled and fix s3 input config (#22036) --- .../_meta/config/filebeat.inputs.reference.xpack.yml.tmpl | 4 ++-- x-pack/filebeat/filebeat.reference.yml | 4 ++-- x-pack/filebeat/input/s3/collector.go | 2 ++ x-pack/filebeat/input/s3/input.go | 8 +++++++- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl index 16964b2c84e..f083b4c814b 100644 --- a/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl +++ b/x-pack/filebeat/_meta/config/filebeat.inputs.reference.xpack.yml.tmpl @@ -67,8 +67,8 @@ #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-s3-input - # Queue urls (required) to receive queue messages from - #queue_urls: ["https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue"] + # Queue url (required) to receive queue messages from + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue" # The duration (in seconds) that the received messages are hidden from subsequent # retrieve requests after being retrieved by a ReceiveMessage request. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index f10a46aa20e..80bfacbf2c3 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -2410,8 +2410,8 @@ filebeat.inputs: #session_token: '${AWS_SESSION_TOKEN:"”}' #credential_profile_name: test-s3-input - # Queue urls (required) to receive queue messages from - #queue_urls: ["https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue"] + # Queue url (required) to receive queue messages from + #queue_url: "https://sqs.us-east-1.amazonaws.com/1234/test-s3-logs-queue" # The duration (in seconds) that the received messages are hidden from subsequent # retrieve requests after being retrieved by a ReceiveMessage request. diff --git a/x-pack/filebeat/input/s3/collector.go b/x-pack/filebeat/input/s3/collector.go index c3d3114c723..9596b5ab23f 100644 --- a/x-pack/filebeat/input/s3/collector.go +++ b/x-pack/filebeat/input/s3/collector.go @@ -153,8 +153,10 @@ func (c *s3Collector) processorKeepAlive(svcSQS sqsiface.ClientAPI, message sqs. for { select { case <-c.cancellation.Done(): + fmt.Println("------- c.cancellation.Done()") return nil case err := <-errC: + fmt.Println("------- err = ", err) if err != nil { if err == context.DeadlineExceeded { c.logger.Info("Context deadline exceeded, updating visibility timeout") diff --git a/x-pack/filebeat/input/s3/input.go b/x-pack/filebeat/input/s3/input.go index 36f160d759e..a3f19f66327 100644 --- a/x-pack/filebeat/input/s3/input.go +++ b/x-pack/filebeat/input/s3/input.go @@ -5,6 +5,7 @@ package s3 import ( + "context" "fmt" "github.com/aws/aws-sdk-go-v2/service/s3" @@ -67,7 +68,12 @@ func (in *s3Input) Run(ctx v2.Context, pipeline beat.Pipeline) error { defer collector.publisher.Close() collector.run() - return ctx.Cancelation.Err() + + if ctx.Cancelation.Err() == context.Canceled { + return nil + } else { + return ctx.Cancelation.Err() + } } func (in *s3Input) createCollector(ctx v2.Context, pipeline beat.Pipeline) (*s3Collector, error) { From f33bfd9b5be3b1f9287b22c575d7f9a057eebb96 Mon Sep 17 00:00:00 2001 From: Brandon Morelli Date: Thu, 22 Oct 2020 12:46:36 -0700 Subject: [PATCH 55/93] docs: move kerberos include (#22109) --- auditbeat/docs/configuring-howto.asciidoc | 4 ++++ filebeat/docs/configuring-howto.asciidoc | 4 ++++ heartbeat/docs/configuring-howto.asciidoc | 4 ++++ journalbeat/docs/configuring-howto.asciidoc | 4 ++++ libbeat/docs/outputs-list.asciidoc | 4 ---- metricbeat/docs/configuring-howto.asciidoc | 4 ++++ packetbeat/docs/configuring-howto.asciidoc | 4 ++++ winlogbeat/docs/configuring-howto.asciidoc | 4 ++++ x-pack/functionbeat/docs/configuring-howto.asciidoc | 4 ++++ 9 files changed, 32 insertions(+), 4 deletions(-) diff --git a/auditbeat/docs/configuring-howto.asciidoc b/auditbeat/docs/configuring-howto.asciidoc index 745c58c7997..65938efb9c7 100644 --- a/auditbeat/docs/configuring-howto.asciidoc +++ b/auditbeat/docs/configuring-howto.asciidoc @@ -42,6 +42,10 @@ include::./reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/filebeat/docs/configuring-howto.asciidoc b/filebeat/docs/configuring-howto.asciidoc index ec70fe23942..f09902a0d26 100644 --- a/filebeat/docs/configuring-howto.asciidoc +++ b/filebeat/docs/configuring-howto.asciidoc @@ -44,6 +44,10 @@ include::./reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::../../libbeat/docs/shared-ilm.asciidoc[] diff --git a/heartbeat/docs/configuring-howto.asciidoc b/heartbeat/docs/configuring-howto.asciidoc index f562b8a42c1..fa312e5d4ac 100644 --- a/heartbeat/docs/configuring-howto.asciidoc +++ b/heartbeat/docs/configuring-howto.asciidoc @@ -38,6 +38,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/journalbeat/docs/configuring-howto.asciidoc b/journalbeat/docs/configuring-howto.asciidoc index 93083ac4ccc..246880468e3 100644 --- a/journalbeat/docs/configuring-howto.asciidoc +++ b/journalbeat/docs/configuring-howto.asciidoc @@ -34,6 +34,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/libbeat/docs/outputs-list.asciidoc b/libbeat/docs/outputs-list.asciidoc index bd3b2878aa6..4181c10f64f 100644 --- a/libbeat/docs/outputs-list.asciidoc +++ b/libbeat/docs/outputs-list.asciidoc @@ -83,9 +83,5 @@ ifdef::requires_xpack[] endif::[] include::{libbeat-outputs-dir}/codec/docs/codec.asciidoc[] endif::[] -ifndef::no_kerberos[] -include::{libbeat-dir}/shared-kerberos-config.asciidoc[] -endif::[] - //# end::outputs-include[] diff --git a/metricbeat/docs/configuring-howto.asciidoc b/metricbeat/docs/configuring-howto.asciidoc index 60f8928df53..dcacba01f79 100644 --- a/metricbeat/docs/configuring-howto.asciidoc +++ b/metricbeat/docs/configuring-howto.asciidoc @@ -40,6 +40,10 @@ include::{docdir}/../docs/reload-configuration.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/packetbeat/docs/configuring-howto.asciidoc b/packetbeat/docs/configuring-howto.asciidoc index cc9e3c9a926..8d27edbafd7 100644 --- a/packetbeat/docs/configuring-howto.asciidoc +++ b/packetbeat/docs/configuring-howto.asciidoc @@ -38,6 +38,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/winlogbeat/docs/configuring-howto.asciidoc b/winlogbeat/docs/configuring-howto.asciidoc index 5c1c6086ace..5d9d4758cf8 100644 --- a/winlogbeat/docs/configuring-howto.asciidoc +++ b/winlogbeat/docs/configuring-howto.asciidoc @@ -35,6 +35,10 @@ include::{libbeat-dir}/shared-path-config.asciidoc[] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::{libbeat-dir}/shared-ilm.asciidoc[] diff --git a/x-pack/functionbeat/docs/configuring-howto.asciidoc b/x-pack/functionbeat/docs/configuring-howto.asciidoc index 192cb79fea3..3d72f9b5a55 100644 --- a/x-pack/functionbeat/docs/configuring-howto.asciidoc +++ b/x-pack/functionbeat/docs/configuring-howto.asciidoc @@ -35,6 +35,10 @@ include::./general-options.asciidoc[] [role="xpack"] include::{libbeat-dir}/outputconfig.asciidoc[] +ifndef::no_kerberos[] +include::{libbeat-dir}/shared-kerberos-config.asciidoc[] +endif::[] + [role="xpack"] include::{libbeat-dir}/shared-ssl-config.asciidoc[] From 2e7b90217e54016d9613ae0a1f58cba8a82cba00 Mon Sep 17 00:00:00 2001 From: Fae Charlton Date: Thu, 22 Oct 2020 15:57:56 -0400 Subject: [PATCH 56/93] [libbeat] Add more disk queue unit tests and fix a size-check bug (#22107) --- .../publisher/queue/diskqueue/core_loop.go | 24 +- .../queue/diskqueue/core_loop_test.go | 623 ++++++++++++++++-- libbeat/publisher/queue/diskqueue/queue.go | 5 + 3 files changed, 594 insertions(+), 58 deletions(-) diff --git a/libbeat/publisher/queue/diskqueue/core_loop.go b/libbeat/publisher/queue/diskqueue/core_loop.go index 77f4aadb47f..ac6e22c52d8 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop.go +++ b/libbeat/publisher/queue/diskqueue/core_loop.go @@ -93,10 +93,10 @@ func (dq *diskQueue) handleProducerWriteRequest(request producerWriteRequest) { // than an entire segment all by itself (as long as it isn't, it is // guaranteed to eventually enter the queue assuming no disk errors). frameSize := request.frame.sizeOnDisk() - if dq.settings.MaxSegmentSize < frameSize { + if dq.settings.maxSegmentOffset() < segmentOffset(frameSize) { dq.logger.Warnf( - "Rejecting event with size %v because the maximum segment size is %v", - frameSize, dq.settings.MaxSegmentSize) + "Rejecting event with size %v because the segment buffer limit is %v", + frameSize, dq.settings.maxSegmentOffset()) request.responseChan <- false return } @@ -326,13 +326,19 @@ func (dq *diskQueue) maybeWritePending() { // Nothing to do right now return } + // Remove everything from pendingFrames and forward it to the writer loop. frames := dq.pendingFrames dq.pendingFrames = nil + dq.writerLoop.requestChan <- writerLoopRequest{frames: frames} - dq.writerLoop.requestChan <- writerLoopRequest{ - frames: frames, + // Compute the size of the request so we know how full the queue is going + // to be. + totalSize := uint64(0) + for _, sf := range frames { + totalSize += sf.frame.sizeOnDisk() } + dq.writeRequestSize = totalSize dq.writing = true } @@ -471,8 +477,12 @@ func (dq *diskQueue) canAcceptFrameOfSize(frameSize uint64) bool { // left in the queue after accounting for the existing segments and the // pending writes that were already accepted. pendingBytes := uint64(0) - for _, request := range dq.pendingFrames { - pendingBytes += request.frame.sizeOnDisk() + for _, sf := range dq.pendingFrames { + pendingBytes += sf.frame.sizeOnDisk() + } + // If a writing request is outstanding, include it in the size total. + if dq.writing { + pendingBytes += dq.writeRequestSize } currentSize := pendingBytes + dq.segments.sizeOnDisk() diff --git a/libbeat/publisher/queue/diskqueue/core_loop_test.go b/libbeat/publisher/queue/diskqueue/core_loop_test.go index 309a145968d..1eb9ff54a15 100644 --- a/libbeat/publisher/queue/diskqueue/core_loop_test.go +++ b/libbeat/publisher/queue/diskqueue/core_loop_test.go @@ -24,76 +24,267 @@ import ( "github.com/elastic/beats/v7/libbeat/logp" ) -func TestProducerWriteRequest(t *testing.T) { - dq := &diskQueue{settings: DefaultSettings()} - frame := &writeFrame{ - serialized: make([]byte, 100), - } - request := producerWriteRequest{ - frame: frame, - shouldBlock: true, - responseChan: make(chan bool, 1), - } - dq.handleProducerWriteRequest(request) - - // The request inserts 100 bytes into an empty queue, so it should succeed. - // We expect: - // - the response channel should contain the value true - // - the frame should be added to pendingFrames and assigned to - // segment 0. - success, ok := <-request.responseChan - if !ok { - t.Error("Expected a response from the producer write request.") - } - if !success { - t.Error("Expected write request to succeed") - } +func TestHandleProducerWriteRequest(t *testing.T) { + // handleProducerWriteRequest should: + // - Immediately reject any frame larger than settings.MaxSegmentSize. + // - If dq.blockedProducers is nonempty (indicating that other frames are + // already waiting for empty space in the queue), or the queue doesn't + // have room for the new frame (see canAcceptFrameOfSize), then it is + // appended to blockedProducers if request.shouldBlock is true, and + // otherwise is rejected immediately. + // - Otherwise, the request is assigned a target segment and appended + // to pendingFrames. + // * If the frame fits in the current writing segment, it is assigned + // to that segment. Otherwise, it is assigned to segments.nextID + // and segments.nextID is incremented (see enqueueWriteFrame). - if len(dq.pendingFrames) != 1 { - t.Error("Expected 1 pending frame after a write request.") - } - if dq.pendingFrames[0].frame != frame { - t.Error("Expected pendingFrames to contain the new frame.") + // For this test setup, the queue is initialized with a max segment + // offset of 1000 and a max total size of 10000. + testCases := map[string]struct { + // The segment structure to start with before calling + // handleProducerWriteRequest + segments diskQueueSegments + + // Whether the blockedProducers list should be nonempty in the + // initial queue state. + blockedProducers bool + + // The size of the frame to send in the producer write request + frameSize int + + // The value to set shouldBlock to in the producer write request + shouldBlock bool + + // The result we expect on the requests's response channel, or + // nil if there should be none. + expectedResult *bool + + // The segment the frame should be assigned to in pendingFrames. + // This is ignored unless expectedResult is &true. + expectedSegment segmentID + }{ + "accept single frame when empty": { + segments: diskQueueSegments{nextID: 5}, + frameSize: 1000, + shouldBlock: false, + expectedResult: boolRef(true), + expectedSegment: 5, + }, + "reject immediately when frame is larger than segment limit": { + // max segment buffer size for the test wrapper is 1000. + frameSize: 1001, + shouldBlock: true, + expectedResult: boolRef(false), + }, + "accept with frame in new segment if current segment is full": { + segments: diskQueueSegments{ + writing: []*queueSegment{{}}, + nextWriteOffset: 600, + nextID: 1, + }, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(true), + expectedSegment: 1, + }, + "reject when full and shouldBlock=false": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {endOffset: 9600}, + }, + }, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(false), + }, + "block when full and shouldBlock=true": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {endOffset: 9600}, + }, + }, + frameSize: 500, + shouldBlock: true, + expectedResult: nil, + }, + "reject when blockedProducers is nonempty and shouldBlock=false": { + blockedProducers: true, + frameSize: 500, + shouldBlock: false, + expectedResult: boolRef(false), + }, + "block when blockedProducers is nonempty and shouldBlock=true": { + blockedProducers: true, + frameSize: 500, + shouldBlock: true, + expectedResult: nil, + }, } - if dq.pendingFrames[0].segment.id != 0 { - t.Error("Expected new frame to be assigned to segment 0.") + + settings := DefaultSettings() + settings.MaxSegmentSize = 1000 + segmentHeaderSize + settings.MaxBufferSize = 10000 + for description, test := range testCases { + dq := &diskQueue{ + logger: logp.L(), + settings: settings, + segments: test.segments, + } + if test.blockedProducers { + // Set an empty placeholder write request + dq.blockedProducers = []producerWriteRequest{{}} + } + initialBlockedProducerCount := len(dq.blockedProducers) + + // Construct a frame of the requested size. We subtract the + // metadata size from the buffer length, so test.frameSize + // corresponds to the "real" on-disk size of the frame. + request := producerWriteRequest{ + frame: makeWriteFrameWithSize(test.frameSize), + shouldBlock: test.shouldBlock, + responseChan: make(chan bool, 1), + } + + dq.handleProducerWriteRequest(request) + + var result *bool + select { + case r := <-request.responseChan: + result = &r + default: + // No response, result can stay nil. + } + + // Check that the result itself is correct. + if result != nil && test.expectedResult != nil { + if *result != *test.expectedResult { + t.Errorf("%s: expected response %v, got %v", + description, *test.expectedResult, *result) + } + } else if result == nil && test.expectedResult != nil { + t.Errorf("%s: expected response %v, got none", + description, *test.expectedResult) + } else if result != nil && test.expectedResult == nil { + t.Errorf("%s: expected no response, got %v", + description, *result) + } + // Check whether the request was added to blockedProducers. + if test.expectedResult != nil && + len(dq.blockedProducers) > initialBlockedProducerCount { + // Requests with responses shouldn't be added to + // blockedProducers. + t.Errorf("%s: request shouldn't be added to blockedProducers", + description) + } else if test.expectedResult == nil && + len(dq.blockedProducers) <= initialBlockedProducerCount { + // Requests without responses should be added to + // blockedProducers. + t.Errorf("%s: request should be added to blockedProducers", + description) + } + // Check whether the frame was added to pendingFrames. + var lastPendingFrame *segmentedFrame + if len(dq.pendingFrames) != 0 { + lastPendingFrame = &dq.pendingFrames[len(dq.pendingFrames)-1] + } + if test.expectedResult != nil && *test.expectedResult { + // If the result is success, the frame should now be + // enqueued. + if lastPendingFrame == nil || + lastPendingFrame.frame != request.frame { + t.Errorf("%s: frame should be added to pendingFrames", + description) + } else if lastPendingFrame.segment.id != test.expectedSegment { + t.Errorf("%s: expected frame to be in segment %v, got %v", + description, test.expectedSegment, + lastPendingFrame.segment.id) + } + // Check that segments.nextID is one more than the segment that + // was just assigned. + if lastPendingFrame != nil && + dq.segments.nextID != test.expectedSegment+1 { + t.Errorf("%s: expected segments.nextID to be %v, got %v", + description, test.expectedSegment+1, dq.segments.nextID) + } + } } } func TestHandleWriterLoopResponse(t *testing.T) { - // Initialize the queue with two writing segments only. + // handleWriterLoopResponse should: + // - Add the values in the bytesWritten array, in order, to the endOffset + // of the segments in segments.writing (these represent the amount + // written to each segment as a result of the preceding writer loop + // request). + // - If bytesWritten covers more than one writing segment, then move + // all except the last one from segments.writing to segments.reading. + // These invariants are relatively simple so this test is "by hand" + // rather than using a structured list of sub-cases. + dq := &diskQueue{ settings: DefaultSettings(), segments: diskQueueSegments{ writing: []*queueSegment{ - {id: 1}, + {id: 1, endOffset: 100}, {id: 2}, + {id: 3}, + {id: 4}, }, }, } - // This response says that the writer loop wrote 200 bytes to the first - // segment and 100 bytes to the second. + + // Write to one segment (no segments should be moved to reading list) dq.handleWriterLoopResponse(writerLoopResponse{ - bytesWritten: []int64{200, 100}, + bytesWritten: []int64{100}, }) - - // After the response is handled, we expect: - // - Each segment's endOffset should be incremented by the bytes written - // - Segment 1 should be moved to the reading list (because all but the - // last segment in a writer loop response has been closed) - // - Segment 2 should remain in the writing list - if len(dq.segments.reading) != 1 || dq.segments.reading[0].id != 1 { - t.Error("Expected segment 1 to move to the reading list") + if len(dq.segments.writing) != 4 || len(dq.segments.reading) != 0 { + t.Fatalf("expected 4 writing and 0 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) } - if len(dq.segments.writing) != 1 || dq.segments.writing[0].id != 2 { - t.Error("Expected segment 2 to remain in the writing list") + if dq.segments.writing[0].endOffset != 200 { + t.Errorf("expected first writing segment to be size 200, got %v", + dq.segments.writing[0].endOffset) + } + + // Write to two segments (the first one should be moved to reading list) + dq.handleWriterLoopResponse(writerLoopResponse{ + bytesWritten: []int64{100, 100}, + }) + if len(dq.segments.writing) != 3 || len(dq.segments.reading) != 1 { + t.Fatalf("expected 3 writing and 1 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) } - if dq.segments.reading[0].endOffset != 200 { - t.Errorf("Expected segment 1 endOffset 200, got %d", + if dq.segments.reading[0].endOffset != 300 { + t.Errorf("expected first reading segment to be size 300, got %v", dq.segments.reading[0].endOffset) } if dq.segments.writing[0].endOffset != 100 { - t.Errorf("Expected segment 2 endOffset 100, got %d", + t.Errorf("expected first writing segment to be size 100, got %v", + dq.segments.writing[0].endOffset) + } + + // Write to three segments (the first two should be moved to reading list) + dq.handleWriterLoopResponse(writerLoopResponse{ + bytesWritten: []int64{100, 100, 500}, + }) + if len(dq.segments.writing) != 1 || len(dq.segments.reading) != 3 { + t.Fatalf("expected 1 writing and 3 reading segments, got %v writing "+ + "and %v reading", len(dq.segments.writing), len(dq.segments.reading)) + } + if dq.segments.reading[0].endOffset != 300 { + t.Errorf("expected first reading segment to be size 300, got %v", + dq.segments.reading[0].endOffset) + } + if dq.segments.reading[1].endOffset != 200 { + t.Errorf("expected second reading segment to be size 200, got %v", + dq.segments.reading[1].endOffset) + } + if dq.segments.reading[2].endOffset != 100 { + t.Errorf("expected third reading segment to be size 100, got %v", + dq.segments.reading[2].endOffset) + } + if dq.segments.writing[0].endOffset != 500 { + t.Errorf("expected first writing segment to be size 500, got %v", dq.segments.writing[0].endOffset) } } @@ -111,7 +302,8 @@ func TestHandleReaderLoopResponse(t *testing.T) { // mark the remaining data as processed) testCases := map[string]struct { - // The segment structure to start with before calling maybeReadPending + // The segment structure to start with before calling + // handleReaderLoopResponse. segments diskQueueSegments response readerLoopResponse @@ -273,9 +465,10 @@ func TestHandleReaderLoopResponse(t *testing.T) { func TestMaybeReadPending(t *testing.T) { // maybeReadPending should: + // - If diskQueue.reading is true, do nothing and return immediately. // - If any unread data is available in a reading or writing segment, // send a readerLoopRequest for the full amount available in the - // first such segment. + // first such segment, and set diskQueue.reading to true. // - When creating a readerLoopRequest that includes the beginning of // a segment (startOffset == 0), set that segment's firstFrameID // to segments.nextReadFrameID (so ACKs based on frame ID can be linked @@ -287,6 +480,8 @@ func TestMaybeReadPending(t *testing.T) { testCases := map[string]struct { // The segment structure to start with before calling maybeReadPending segments diskQueueSegments + // The value of the diskQueue.reading flag before calling maybeReadPending + reading bool // The request we expect to see on the reader loop's request channel, // or nil if there should be none. expectedRequest *readerLoopRequest @@ -308,6 +503,15 @@ func TestMaybeReadPending(t *testing.T) { endOffset: 1000, }, }, + "do nothing if reading flag is set": { + segments: diskQueueSegments{ + reading: []*queueSegment{ + {id: 1, endOffset: 1000}, + }, + }, + reading: true, + expectedRequest: nil, + }, "read the end of a segment": { segments: diskQueueSegments{ reading: []*queueSegment{ @@ -402,6 +606,7 @@ func TestMaybeReadPending(t *testing.T) { readerLoop: &readerLoop{ requestChan: make(chan readerLoopRequest, 1), }, + reading: test.reading, } firstFrameID := test.segments.nextReadFrameID dq.maybeReadPending() @@ -421,6 +626,10 @@ func TestMaybeReadPending(t *testing.T) { t.Errorf( "%s: maybeReadPending should update firstFrameID", description) } + if !dq.reading { + t.Errorf( + "%s: maybeReadPending should set the reading flag", description) + } default: if test.expectedRequest != nil { t.Errorf("%s: expected read request %v, got none", @@ -446,10 +655,322 @@ func TestMaybeReadPending(t *testing.T) { } } +func TestMaybeWritePending(t *testing.T) { + // maybeWritePending should: + // - If diskQueue.writing is true, do nothing and return immediately. + // - Otherwise, if diskQueue.pendingFrames is nonempty: + // * send its contents as a writer loop request + // * set diskQueue.writeRequestSize to the total size of the + // request's frames + // * reset diskQueue.pendingFrames to nil + // * set diskQueue.writing to true. + dq := &diskQueue{ + settings: DefaultSettings(), + writerLoop: &writerLoop{ + requestChan: make(chan writerLoopRequest, 1), + }, + } + + // First call: pendingFrames is empty, should do nothing. + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + t.Errorf("expected no request on empty pendingFrames, got %v", request) + default: + if dq.writing { + t.Errorf( + "maybeWritePending shouldn't set writing flag without a request") + } + } + + // Set up some frame data for the remaining calls. + pendingFrames := []segmentedFrame{ + {frame: makeWriteFrameWithSize(100)}, + {frame: makeWriteFrameWithSize(200)}} + // The size on disk should be the summed buffer lengths plus + // frameMetadataSize times the number of frames + expectedSize := uint64(300) + + // Second call: writing is true, should do nothing. + dq.pendingFrames = pendingFrames + dq.writing = true + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + t.Errorf("expected no request with writing flag set, got %v", request) + default: + } + + // Third call: writing is false, should send a request with pendingFrames. + dq.writing = false + dq.maybeWritePending() + select { + case request := <-dq.writerLoop.requestChan: + // We are extra strict, because we can afford to be: the request should + // contain not just the same elements, but the exact same array (slice) + // as the previous value of pendingFrames. + if len(request.frames) != len(pendingFrames) || + &request.frames[0] != &pendingFrames[0] { + t.Errorf( + "expected request containing pendingFrames, got a different array") + } + if dq.writeRequestSize != expectedSize { + t.Errorf("expected writeRequestSize to equal %v, got %v", + expectedSize, dq.writeRequestSize) + } + if len(dq.pendingFrames) != 0 { + t.Errorf("pendingFrames should be reset after a write request") + } + if !dq.writing { + t.Errorf("the writing flag should be set after a write request") + } + default: + } +} + +func TestMaybeUnblockProducers(t *testing.T) { + // maybeUnblockProducers should: + // - As long as diskQueue.blockedProducers is nonempty and the queue has + // capacity to add its first element (see TestCanAcceptFrameOfSize): + // * Add the request's frame to diskQueue.pendingFrames (see + // enqueueWriteFrame) + // * Report success (true) to the producer's response channel + // * Remove the request from blockedProducers + // When complete, either blockedProducers should be empty or its first + // element should be too big to add to the queue. + + settings := DefaultSettings() + settings.MaxBufferSize = 1000 + responseChans := []chan bool{ + make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} + dq := &diskQueue{ + settings: settings, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + }, + blockedProducers: []producerWriteRequest{ + { + frame: makeWriteFrameWithSize(200), + responseChan: responseChans[0], + }, + { + frame: makeWriteFrameWithSize(200), + responseChan: responseChans[1], + }, + { + frame: makeWriteFrameWithSize(501), + responseChan: responseChans[2], + }, + }, + } + + // First call: we expect two producers to be unblocked, because the third + // one would push us one byte above the 1000 byte limit. + dq.maybeUnblockProducers() + if len(dq.pendingFrames) != 2 || len(dq.blockedProducers) != 1 { + t.Fatalf("Expected 2 pending frames and 1 blocked producer, got %v and %v", + len(dq.pendingFrames), len(dq.blockedProducers)) + } + for i := 0; i < 3; i++ { + select { + case response := <-responseChans[i]: + if i < 2 && !response { + t.Errorf("Expected success response for producer %v, got failure", i) + } else if i == 2 { + t.Fatalf("Expected no response for producer 2, got %v", response) + } + default: + if i < 2 { + t.Errorf("Expected success response for producer %v, got none", i) + } + } + } + + dq.blockedProducers[0].frame = makeWriteFrameWithSize(500) + // Second call: with the blocked request one byte smaller, it should fit + // into the queue, and be added with the other pending frames. + dq.maybeUnblockProducers() + if len(dq.pendingFrames) != 3 || len(dq.blockedProducers) != 0 { + t.Fatalf("Expected 3 pending frames and 0 blocked producers, got %v and %v", + len(dq.pendingFrames), len(dq.blockedProducers)) + } + for i := 0; i < 3; i++ { + // This time the first two response channels should get nothing and the + // third should get success. + select { + case response := <-responseChans[i]: + if i < 2 { + t.Errorf("Expected no response for producer %v, got %v", i, response) + } else if !response { + t.Errorf("Expected success response for producer 2, got failure") + } + default: + if i == 2 { + t.Errorf("Expected success response for producer 2, got none") + } + } + } +} + +func TestCanAcceptFrameOfSize(t *testing.T) { + // canAcceptFrameOfSize decides whether the queue has enough free capacity + // to accept an incoming frame of the given size. It should: + // - If the length of pendingFrames is >= settings.WriteAheadLimit, + // return false. + // - If the queue size is unbounded (MaxBufferSize == 0), return true. + // - Otherwise, return true iff the total size of the queue plus the new + // frame is <= settings.MaxBufferSize. + // The size of the queue is calculated as the summed size of: + // * All segments listed in diskQueue.segments (writing, reading, acking, + // acked) + // * All frames in diskQueue.pendingFrames (which have been accepted but + // not yet written) + // * If a write request is outstanding (diskQueue.writing == true), + // diskQueue.writeRequestSize, which is the size of the data that is + // being written by writerLoop but hasn't yet been completed. + // All test cases are run with WriteAheadLimit = 2. + + testCases := map[string]struct { + // The value of settings.MaxBufferSize in the test queue. + maxBufferSize uint64 + // The value of the segments field in the test queue. + segments diskQueueSegments + // The value of pendingFrames in the test queue. + pendingFrames []segmentedFrame + // The value of writeRequestSize (the size of the most recent write + // request) in the test queue. + writeRequestSize uint64 + // The value of the writing flag in the test queue (writeRequestSize is + // included in the queue size calculation only if there is an active + // writing request). + writing bool + + // If expectedOutcomes[v] = b then canAcceptFrameOfSize(v) should return b. + expectedOutcomes map[uint64]bool + }{ + "always reject when at the write ahead limit": { + maxBufferSize: 1000, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(10)}, + {frame: makeWriteFrameWithSize(10)}, + }, + expectedOutcomes: map[uint64]bool{10: false}, + }, + "always accept when queue size is unbounded": { + maxBufferSize: 0, + expectedOutcomes: map[uint64]bool{ + 1: true, 1000: true, 1000000: true, 1000000000: true, + }, + }, + // The remaining cases are all below the write ahead limit and have + // bounded buffer size, we are just testing that the various + // source values are all accounted for. + "pendingFrames counts against buffer capacity": { + maxBufferSize: 1000, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(500)}, + }, + // There should be exactly 500 bytes capacity left + expectedOutcomes: map[uint64]bool{ + 500: true, 501: false, + }, + }, + "diskQueue.segments counts against buffer capacity": { + maxBufferSize: 1000, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + reading: []*queueSegment{segmentWithSize(100)}, + acking: []*queueSegment{segmentWithSize(100)}, + acked: []*queueSegment{segmentWithSize(100)}, + }, + // Four segments of size 100, should be exactly 600 bytes left + expectedOutcomes: map[uint64]bool{ + 600: true, 601: false, + }, + }, + "writeRequestSize counts against buffer capacity when writing=true": { + maxBufferSize: 1000, + writeRequestSize: 600, + writing: true, + expectedOutcomes: map[uint64]bool{ + 400: true, 401: false, + }, + }, + "writeRequestSize doesn't count against buffer capacity when writing=false": { + maxBufferSize: 1000, + writeRequestSize: 600, + writing: false, + expectedOutcomes: map[uint64]bool{ + 1000: true, 1001: false, + }, + }, + "buffer capacity includes the sum of all sources": { + // include all of them together. + maxBufferSize: 1000, + segments: diskQueueSegments{ + writing: []*queueSegment{segmentWithSize(100)}, + reading: []*queueSegment{segmentWithSize(100)}, + acking: []*queueSegment{segmentWithSize(100)}, + acked: []*queueSegment{segmentWithSize(100)}, + }, + pendingFrames: []segmentedFrame{ + {frame: makeWriteFrameWithSize(100)}, + }, + writeRequestSize: 200, + writing: true, + expectedOutcomes: map[uint64]bool{ + 300: true, 301: false, + }, + }, + } + + for description, test := range testCases { + settings := DefaultSettings() + settings.WriteAheadLimit = 2 + settings.MaxBufferSize = test.maxBufferSize + dq := &diskQueue{ + settings: settings, + segments: test.segments, + pendingFrames: test.pendingFrames, + writeRequestSize: test.writeRequestSize, + writing: test.writing, + } + for size, expected := range test.expectedOutcomes { + result := dq.canAcceptFrameOfSize(size) + if result != expected { + t.Errorf("%v: expected canAcceptFrameOfSize(%v) = %v, got %v", + description, size, expected, result) + } + } + } +} + +func boolRef(b bool) *bool { + return &b +} + func segmentIDRef(id segmentID) *segmentID { return &id } +// Convenience helper that creates a frame that will have the given size on +// disk after accounting for header / footer size. +func makeWriteFrameWithSize(size int) *writeFrame { + if size <= frameMetadataSize { + // Frames must have a nonempty data region. + return nil + } + return &writeFrame{serialized: make([]byte, size-frameMetadataSize)} +} + +func segmentWithSize(size int) *queueSegment { + if size < segmentHeaderSize { + // Can't have a segment smaller than the segment header + return nil + } + return &queueSegment{endOffset: segmentOffset(size - segmentHeaderSize)} +} + func equalReaderLoopRequests( r0 readerLoopRequest, r1 readerLoopRequest, ) bool { diff --git a/libbeat/publisher/queue/diskqueue/queue.go b/libbeat/publisher/queue/diskqueue/queue.go index 5f756996e5f..1819ced21d5 100644 --- a/libbeat/publisher/queue/diskqueue/queue.go +++ b/libbeat/publisher/queue/diskqueue/queue.go @@ -55,6 +55,11 @@ type diskQueue struct { // otherwise. writing bool + // If writing is true, then writeRequestSize equals the number of bytes it + // contained. Used to calculate how much free capacity the queue has left + // after all scheduled writes have been completed (see canAcceptFrameOfSize). + writeRequestSize uint64 + // reading is true if the reader loop is processing a request, false // otherwise. reading bool From 3842bee898759378781ec37e0d0697637fcff60b Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Thu, 22 Oct 2020 16:56:37 -0500 Subject: [PATCH 57/93] Incorporate librpm fix feedback (#22098) - re-order imports - fix capitalization in error string --- x-pack/auditbeat/module/system/package/rpm_linux.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index 6e5df7e0c6e..399c121f878 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -7,6 +7,7 @@ package pkg import ( + "debug/elf" "errors" "fmt" "runtime" @@ -14,8 +15,6 @@ import ( "time" "unsafe" - "debug/elf" - "github.com/coreos/pkg/dlopen" ) @@ -257,7 +256,7 @@ func openLibrpm() (*librpm, error) { librpm.handle, err = dlopen.GetHandle(librpmNames) if err != nil { - return nil, fmt.Errorf("Couldn't open %v", librpmNames) + return nil, fmt.Errorf("couldn't open %v: %v", librpmNames, err) } librpm.rpmtsCreate, err = librpm.handle.GetSymbolPointer("rpmtsCreate") From 14326dc5f40f67868db1c7ba1a76200f5f2791af Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:34:14 -0700 Subject: [PATCH 58/93] Edit 6.8.13 release notes (#22120) --- CHANGELOG.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 349eb49edb3..df4e85892e3 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -2607,7 +2607,7 @@ https://github.com/elastic/beats/compare/v6.8.12...v6.8.13[View commits] *Filebeat* -- Add container image in Kubernetes metadata {pull}13356[13356] {issue}12688[12688] +- Add container image in Kubernetes metadata. {pull}13356[13356] {issue}12688[12688] [[release-notes-6.8.12]] === Beats version 6.8.12 From e74e886884ffbc1ca5c59ef636bfaed41792cbe7 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:36:05 -0700 Subject: [PATCH 59/93] Edit 7.9.3 changelog (#22117) --- CHANGELOG.asciidoc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index df4e85892e3..61353d3afdb 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -11,20 +11,21 @@ https://github.com/elastic/beats/compare/v7.9.2...v7.9.3[View commits] *Affecting all Beats* -- The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] +- The `o365audit` input and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21258[21258] *Auditbeat* -- system/socket: Fixed a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] +- system/socket: Fix a crash due to concurrent map read and write. {issue}21192[21192] {pull}21690[21690] *Filebeat* - Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] + *Metricbeat* -- Fix remote_write flaky test. {pull}21173[21173] -- Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] -- [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] +- Fix `remote_write` flaky test. {pull}21173[21173] +- Fix panic in Kubernetes autodiscovery caused by storing stateless keystores. {issue}21843[21843] {pull}21880[21880] +- Remove redundant dockersock volume mount to avoid problems on Kubernetes deployments that do not use docker as the container runtime. {pull}22009[22009] [[release-notes-7.9.2]] From eb695ef4312a5dffaa708f1591ebaf5b7800d9ea Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 22 Oct 2020 20:37:17 -0700 Subject: [PATCH 60/93] Add fleet settings image (#22065) --- x-pack/elastic-agent/docs/run-elastic-agent.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc index 34bb2481f7f..2314f7652f4 100644 --- a/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc +++ b/x-pack/elastic-agent/docs/run-elastic-agent.asciidoc @@ -27,7 +27,7 @@ For self-managed installations, set the URLs for {es} and {kib}, including the http ports, then save your changes. + [role="screenshot"] -//image::images/kibana-fleet-settings.png[{fleet} settings] +image::images/kibana-fleet-settings.png[{fleet} settings] . Select **Agents**, then click **Add agent** to get an enrollment token. See <> for detailed steps. From 155dfda99aeb7a74383aa9023d4f350c4d5da668 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Fri, 23 Oct 2020 09:12:12 +0200 Subject: [PATCH 61/93] Change x509 mappings from file. to tls.server. (#22097) --- .../module/suricata/eve/ingest/pipeline.yml | 32 ++++++------ .../eve/test/eve-alerts.log-expected.json | 52 +++++++++---------- .../eve/test/eve-small.log-expected.json | 24 ++++----- 3 files changed, 54 insertions(+), 54 deletions(-) diff --git a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml index 01ed5accbe6..e132a8acdde 100644 --- a/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml +++ b/x-pack/filebeat/module/suricata/eve/ingest/pipeline.yml @@ -247,27 +247,27 @@ processors: ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.C - target_field: file.x509.issuer.country + target_field: tls.server.x509.issuer.country ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.CN - target_field: file.x509.issuer.common_name + target_field: tls.server.x509.issuer.common_name ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.L - target_field: file.x509.issuer.locality + target_field: tls.server.x509.issuer.locality ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.O - target_field: file.x509.issuer.organization + target_field: tls.server.x509.issuer.organization ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.OU - target_field: file.x509.issuer.organizational_unit + target_field: tls.server.x509.issuer.organizational_unit ignore_missing: true - rename: field: suricata.eve.tls.kv_issuerdn.ST - target_field: file.x509.issuer.state_or_province + target_field: tls.server.x509.issuer.state_or_province ignore_missing: true - gsub: field: suricata.eve.tls.subject @@ -282,34 +282,34 @@ processors: ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.C - target_field: file.x509.subject.country + target_field: tls.server.x509.subject.country ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.CN - target_field: file.x509.subject.common_name + target_field: tls.server.x509.subject.common_name ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.L - target_field: file.x509.subject.locality + target_field: tls.server.x509.subject.locality ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.O - target_field: file.x509.subject.organization + target_field: tls.server.x509.subject.organization ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.OU - target_field: file.x509.subject.organizational_unit + target_field: tls.server.x509.subject.organizational_unit ignore_missing: true - rename: field: suricata.eve.tls.kv_subject.ST - target_field: file.x509.subject.state_or_province + target_field: tls.server.x509.subject.state_or_province ignore_missing: true - set: - field: file.x509.serial_number + field: tls.server.x509.serial_number value: '{{suricata.eve.tls.serial}}' ignore_empty_value: true - gsub: - field: file.x509.serial_number + field: tls.server.x509.serial_number pattern: ':' replacement: '' ignore_missing: true @@ -326,11 +326,11 @@ processors: - ISO8601 if: ctx.suricata?.eve?.tls?.notbefore != null - set: - field: file.x509.not_after + field: tls.server.x509.not_after value: '{{tls.server.not_after}}' ignore_empty_value: true - set: - field: file.x509.not_before + field: tls.server.x509.not_before value: '{{tls.server.not_before}}' ignore_empty_value: true - append: diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json index a63e2fd592a..ecccab3a10f 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-alerts.log-expected.json @@ -1633,17 +1633,6 @@ "event.type": [ "protocol" ], - "file.x509.issuer.common_name": "Google Internet Authority G2", - "file.x509.issuer.country": "US", - "file.x509.issuer.organization": "Google Inc", - "file.x509.not_after": "2024-07-16T14:52:35.000Z", - "file.x509.not_before": "2019-07-17T14:52:35.000Z", - "file.x509.serial_number": "001122334455667788", - "file.x509.subject.common_name": "*.google.com", - "file.x509.subject.country": "US", - "file.x509.subject.locality": "Mountain View", - "file.x509.subject.organization": "Google Inc", - "file.x509.subject.state_or_province": "California", "fileset.name": "eve", "input.type": "log", "log.offset": 16546, @@ -1687,6 +1676,17 @@ "tls.server.not_after": "2024-07-16T14:52:35.000Z", "tls.server.not_before": "2019-07-17T14:52:35.000Z", "tls.server.subject": "C=US, ST=California, L=Mountain View, O=Google Inc, CN=*.google.com", + "tls.server.x509.issuer.common_name": "Google Internet Authority G2", + "tls.server.x509.issuer.country": "US", + "tls.server.x509.issuer.organization": "Google Inc", + "tls.server.x509.not_after": "2024-07-16T14:52:35.000Z", + "tls.server.x509.not_before": "2019-07-17T14:52:35.000Z", + "tls.server.x509.serial_number": "001122334455667788", + "tls.server.x509.subject.common_name": "*.google.com", + "tls.server.x509.subject.country": "US", + "tls.server.x509.subject.locality": "Mountain View", + "tls.server.x509.subject.organization": "Google Inc", + "tls.server.x509.subject.state_or_province": "California", "tls.version": "1.2", "tls.version_protocol": "tls" }, @@ -1711,21 +1711,6 @@ "event.type": [ "allowed" ], - "file.x509.issuer.common_name": "Unknown", - "file.x509.issuer.country": "Unknown", - "file.x509.issuer.locality": "Unknown", - "file.x509.issuer.organization": "Unknown", - "file.x509.issuer.organizational_unit": "Unknown", - "file.x509.issuer.state_or_province": "Unknown", - "file.x509.not_after": "2026-06-25T17:36:29.000Z", - "file.x509.not_before": "2016-06-27T17:36:29.000Z", - "file.x509.serial_number": "72A92C51", - "file.x509.subject.common_name": "Unknown", - "file.x509.subject.country": "Unknown", - "file.x509.subject.locality": "Unknown", - "file.x509.subject.organization": "Unknown", - "file.x509.subject.organizational_unit": "Unknown", - "file.x509.subject.state_or_province": "Unknown", "fileset.name": "eve", "input.type": "log", "log.offset": 17541, @@ -1781,6 +1766,21 @@ "tls.server.not_after": "2026-06-25T17:36:29.000Z", "tls.server.not_before": "2016-06-27T17:36:29.000Z", "tls.server.subject": "C=Unknown, ST=Unknown, L=Unknown, O=Unknown, OU=Unknown, CN=Unknown", + "tls.server.x509.issuer.common_name": "Unknown", + "tls.server.x509.issuer.country": "Unknown", + "tls.server.x509.issuer.locality": "Unknown", + "tls.server.x509.issuer.organization": "Unknown", + "tls.server.x509.issuer.organizational_unit": "Unknown", + "tls.server.x509.issuer.state_or_province": "Unknown", + "tls.server.x509.not_after": "2026-06-25T17:36:29.000Z", + "tls.server.x509.not_before": "2016-06-27T17:36:29.000Z", + "tls.server.x509.serial_number": "72A92C51", + "tls.server.x509.subject.common_name": "Unknown", + "tls.server.x509.subject.country": "Unknown", + "tls.server.x509.subject.locality": "Unknown", + "tls.server.x509.subject.organization": "Unknown", + "tls.server.x509.subject.organizational_unit": "Unknown", + "tls.server.x509.subject.state_or_province": "Unknown", "tls.version": "1.2", "tls.version_protocol": "tls" } diff --git a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json index 4851f2db826..2db09a8ee38 100644 --- a/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json +++ b/x-pack/filebeat/module/suricata/eve/test/eve-small.log-expected.json @@ -430,18 +430,6 @@ "event.type": [ "protocol" ], - "file.x509.issuer.common_name": "Apple IST CA 2 - G1", - "file.x509.issuer.country": "US", - "file.x509.issuer.organization": "Apple Inc.", - "file.x509.issuer.organizational_unit": "Certification Authority", - "file.x509.not_after": "2019-03-29T17:54:31.000Z", - "file.x509.not_before": "2017-02-27T17:54:31.000Z", - "file.x509.serial_number": "5C9CE1097887F807", - "file.x509.subject.common_name": "*.icloud.com", - "file.x509.subject.country": "US", - "file.x509.subject.organization": "Apple Inc.", - "file.x509.subject.organizational_unit": "management:idms.group.506364", - "file.x509.subject.state_or_province": "California", "fileset.name": "eve", "input.type": "log", "log.offset": 4683, @@ -479,6 +467,18 @@ "tls.server.not_after": "2019-03-29T17:54:31.000Z", "tls.server.not_before": "2017-02-27T17:54:31.000Z", "tls.server.subject": "CN=*.icloud.com, OU=management:idms.group.506364, O=Apple Inc., ST=California, C=US", + "tls.server.x509.issuer.common_name": "Apple IST CA 2 - G1", + "tls.server.x509.issuer.country": "US", + "tls.server.x509.issuer.organization": "Apple Inc.", + "tls.server.x509.issuer.organizational_unit": "Certification Authority", + "tls.server.x509.not_after": "2019-03-29T17:54:31.000Z", + "tls.server.x509.not_before": "2017-02-27T17:54:31.000Z", + "tls.server.x509.serial_number": "5C9CE1097887F807", + "tls.server.x509.subject.common_name": "*.icloud.com", + "tls.server.x509.subject.country": "US", + "tls.server.x509.subject.organization": "Apple Inc.", + "tls.server.x509.subject.organizational_unit": "management:idms.group.506364", + "tls.server.x509.subject.state_or_province": "California", "tls.version": "1.2", "tls.version_protocol": "tls" }, From 2f7b15b7da8f41ef1534d5c5a3c1ac80d9ffbd40 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Mon, 26 Oct 2020 10:15:05 +0100 Subject: [PATCH 62/93] Use default config when creating the input (#22126) --- x-pack/filebeat/input/httpjson/input_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/filebeat/input/httpjson/input_manager.go b/x-pack/filebeat/input/httpjson/input_manager.go index 21f5066dc05..8d7e6070786 100644 --- a/x-pack/filebeat/input/httpjson/input_manager.go +++ b/x-pack/filebeat/input/httpjson/input_manager.go @@ -36,7 +36,7 @@ func (m inputManager) Init(grp unison.Group, mode v2.Mode) error { // Create creates a cursor input manager if the config has a date cursor set up, // otherwise it creates a stateless input manager. func (m inputManager) Create(cfg *common.Config) (v2.Input, error) { - var config config + config := newDefaultConfig() if err := cfg.Unpack(&config); err != nil { return nil, err } From a56193354a5a24b003ac33243916d42f7e39274f Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 11:22:26 +0000 Subject: [PATCH 63/93] [CI] support windows-10 (#19804) --- Jenkinsfile.yml | 2 +- auditbeat/Jenkinsfile.yml | 11 +++++++++++ filebeat/Jenkinsfile.yml | 13 +++++++++++++ heartbeat/Jenkinsfile.yml | 11 +++++++++++ metricbeat/Jenkinsfile.yml | 11 +++++++++++ packetbeat/Jenkinsfile.yml | 11 +++++++++++ winlogbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/auditbeat/Jenkinsfile.yml | 13 ++++++++++++- x-pack/elastic-agent/Jenkinsfile.yml | 11 +++++++++++ x-pack/filebeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/functionbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/metricbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/packetbeat/Jenkinsfile.yml | 13 ++++++++++++- x-pack/winlogbeat/Jenkinsfile.yml | 11 +++++++++++ 14 files changed, 148 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index f7b21e1cbdf..cc35232d6d0 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -8,7 +8,7 @@ projects: - "libbeat" - "metricbeat" - "packetbeat" - - "winlogbeat" + ##- "winlogbeat" See https://github.com/elastic/beats/issues/22046 - "x-pack/auditbeat" - "x-pack/dockerlogbeat" - "x-pack/elastic-agent" diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index b3f20af2d37..c68b5689f48 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -69,3 +69,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test auditbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index 09dbe948c72..d8ea11c24a5 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -57,3 +57,16 @@ stages: - "windows-2016" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test filebeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/heartbeat/Jenkinsfile.yml b/heartbeat/Jenkinsfile.yml index 1d41dbe581e..032ec411892 100644 --- a/heartbeat/Jenkinsfile.yml +++ b/heartbeat/Jenkinsfile.yml @@ -67,4 +67,15 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test heartbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/metricbeat/Jenkinsfile.yml b/metricbeat/Jenkinsfile.yml index bdd45090837..e6c4ffcef0e 100644 --- a/metricbeat/Jenkinsfile.yml +++ b/metricbeat/Jenkinsfile.yml @@ -62,3 +62,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test metricbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/packetbeat/Jenkinsfile.yml b/packetbeat/Jenkinsfile.yml index adf6471b82a..ef373bb5f15 100644 --- a/packetbeat/Jenkinsfile.yml +++ b/packetbeat/Jenkinsfile.yml @@ -67,3 +67,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test packetbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index 94b36b0e647..3ec79093ca4 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -41,3 +41,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test winlogbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/auditbeat/Jenkinsfile.yml b/x-pack/auditbeat/Jenkinsfile.yml index 1294c4681b4..f4e55ea6372 100644 --- a/x-pack/auditbeat/Jenkinsfile.yml +++ b/x-pack/auditbeat/Jenkinsfile.yml @@ -52,7 +52,7 @@ stages: - "windows-2016" when: ## Override the top-level when. comments: - - "/test auditbeat for windows-2016" + - "/test x-pack/auditbeat for windows-2016" labels: - "windows-2016" branches: true ## for all the branches @@ -68,3 +68,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/auditbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/elastic-agent/Jenkinsfile.yml b/x-pack/elastic-agent/Jenkinsfile.yml index bf1bfed3ddd..d324e3381af 100644 --- a/x-pack/elastic-agent/Jenkinsfile.yml +++ b/x-pack/elastic-agent/Jenkinsfile.yml @@ -67,3 +67,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/elastic-agent for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/filebeat/Jenkinsfile.yml b/x-pack/filebeat/Jenkinsfile.yml index 5bd2bcd40cf..d28520b7c32 100644 --- a/x-pack/filebeat/Jenkinsfile.yml +++ b/x-pack/filebeat/Jenkinsfile.yml @@ -68,3 +68,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/filebeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/functionbeat/Jenkinsfile.yml b/x-pack/functionbeat/Jenkinsfile.yml index ecb2bd14e0e..117d92e3179 100644 --- a/x-pack/functionbeat/Jenkinsfile.yml +++ b/x-pack/functionbeat/Jenkinsfile.yml @@ -65,3 +65,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/functionbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 60a593c488d..8506eb12e69 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -57,3 +57,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/metricbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index e3fa9ad0324..a3c11636dc6 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -24,7 +24,7 @@ stages: - "windows-2016" when: ## Override the top-level when. comments: - - "/test x-pack/winlogbeat for windows-2016" + - "/test x-pack/packetbeat for windows-2016" labels: - "windows-2016" branches: true ## for all the branches @@ -40,3 +40,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/packetbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/winlogbeat/Jenkinsfile.yml b/x-pack/winlogbeat/Jenkinsfile.yml index 371f0aa6f48..45dfcad9d45 100644 --- a/x-pack/winlogbeat/Jenkinsfile.yml +++ b/x-pack/winlogbeat/Jenkinsfile.yml @@ -40,3 +40,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags + windows-10: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-10" + when: ## Override the top-level when. + comments: + - "/test x-pack/winlogbeat for windows-10" + labels: + - "windows-10" + branches: true ## for all the branches + tags: true ## for all the tags From 97d7324690326e2b3eb1e6014bee4a191daf732c Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 13:59:08 +0000 Subject: [PATCH 64/93] [CI] Enable winlogbeat (#22142) --- Jenkinsfile.yml | 2 +- winlogbeat/Jenkinsfile.yml | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index cc35232d6d0..f7b21e1cbdf 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -8,7 +8,7 @@ projects: - "libbeat" - "metricbeat" - "packetbeat" - ##- "winlogbeat" See https://github.com/elastic/beats/issues/22046 + - "winlogbeat" - "x-pack/auditbeat" - "x-pack/dockerlogbeat" - "x-pack/elastic-agent" diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index 3ec79093ca4..3b9c71bf0c3 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -41,14 +41,14 @@ stages: - "windows-2012" branches: true ## for all the branches tags: true ## for all the tags - windows-10: - mage: "mage build unitTest" - platforms: ## override default labels in this specific stage. - - "windows-10" - when: ## Override the top-level when. - comments: - - "/test winlogbeat for windows-10" - labels: - - "windows-10" - branches: true ## for all the branches - tags: true ## for all the tags + # windows-10: See https://github.com/elastic/beats/issues/22046 + # mage: "mage build unitTest" + # platforms: ## override default labels in this specific stage. + # - "windows-10" + # when: ## Override the top-level when. + # comments: + # - "/test winlogbeat for windows-10" + # labels: + # - "windows-10" + # branches: true ## for all the branches + # tags: true ## for all the tags From d36a5106da49ccce2ae1d4a5f53397e73ea8417c Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 26 Oct 2020 17:56:53 +0200 Subject: [PATCH 65/93] Remove old TODO on kubernetes node update (#22074) --- libbeat/autodiscover/providers/kubernetes/node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/libbeat/autodiscover/providers/kubernetes/node.go b/libbeat/autodiscover/providers/kubernetes/node.go index a78622756cd..95e23b33d2a 100644 --- a/libbeat/autodiscover/providers/kubernetes/node.go +++ b/libbeat/autodiscover/providers/kubernetes/node.go @@ -105,7 +105,6 @@ func (n *node) OnUpdate(obj interface{}) { time.AfterFunc(n.config.CleanupTimeout, func() { n.emit(node, "stop") }) } else { n.logger.Debugf("Watcher Node update: %+v", obj) - // TODO: figure out how to avoid stop starting when node status is periodically being updated by kubelet n.emit(node, "stop") n.emit(node, "start") } From 13a195a16aec0468fb6de228fa3bb696c2d8aeef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Mon, 26 Oct 2020 17:27:05 +0100 Subject: [PATCH 66/93] Fix Google Cloud Function configuration file issues (#22156) ## What does this PR do? This PR adds a new function to to `cfgfile` to set the path to the configuration file of a Beat. This fixes the issue on GCP with Functionbeat. ## Why is it important? ATM Functionbeat cannot run on GCP. ## Related issues Closes #20864 --- CHANGELOG.next.asciidoc | 1 + libbeat/cfgfile/cfgfile.go | 4 ++++ x-pack/functionbeat/provider/gcp/pubsub/pubsub.go | 1 + x-pack/functionbeat/provider/gcp/storage/storage.go | 1 + x-pack/functionbeat/scripts/mage/update.go | 3 +++ 5 files changed, 10 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1bf2cc8f762..9e79ccef1dd 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -400,6 +400,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Do not need Google credentials if not required for the operation. {issue}17329[17329] {pull}21072[21072] - Fix dependency issues of GCP functions. {issue}20830[20830] {pull}21070[21070] - Fix catchall bucket config errors by adding more validation. {issue}17572[16282] {pull}20887[16287] +- Fix Google Cloud Function configuration issue. {issue}20864[20864] {pull}22156[22156] ==== Added diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index 767cbd34bc5..ff394906931 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -205,6 +205,10 @@ func LoadList(file string) ([]*common.Config, error) { return c, nil } +func SetConfigPath(path string) { + *configPath = path +} + // GetPathConfig returns ${path.config}. If ${path.config} is not set, ${path.home} is returned. func GetPathConfig() string { if *configPath != "" { diff --git a/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go b/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go index 44f77695e94..813a0df9cc0 100644 --- a/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go +++ b/x-pack/functionbeat/provider/gcp/pubsub/pubsub.go @@ -29,6 +29,7 @@ func RunPubSub(ctx context.Context, m gpubsub.Message) error { ConfigOverrides: config.FunctionOverrides, } + cfgfile.SetConfigPath("/srv/src/pubsub") cfgfile.ChangeDefaultCfgfileFlag(settings.Name) return instance.Run(settings, initFunctionbeat(ctx, m)) diff --git a/x-pack/functionbeat/provider/gcp/storage/storage.go b/x-pack/functionbeat/provider/gcp/storage/storage.go index c9d1660d67c..2de829392d2 100644 --- a/x-pack/functionbeat/provider/gcp/storage/storage.go +++ b/x-pack/functionbeat/provider/gcp/storage/storage.go @@ -27,6 +27,7 @@ func RunCloudStorage(ctx context.Context, e gcp.StorageEvent) error { ConfigOverrides: config.FunctionOverrides, } + cfgfile.SetConfigPath("/srv/src/storage") cfgfile.ChangeDefaultCfgfileFlag(settings.Name) return instance.Run(settings, initFunctionbeat(ctx, e)) diff --git a/x-pack/functionbeat/scripts/mage/update.go b/x-pack/functionbeat/scripts/mage/update.go index 59b56cb6bed..99f1b9a3ce3 100644 --- a/x-pack/functionbeat/scripts/mage/update.go +++ b/x-pack/functionbeat/scripts/mage/update.go @@ -74,6 +74,9 @@ func (Update) VendorBeats() error { Exclude: []string{ ".*_test.go$", ".*.yml", + // XXX GCP function metadata lib must be removed to avoid build failures + // GH issue: https://github.com/googleapis/google-cloud-go/issues/1947 + ".*cloud.google.com/go.*/functions/metadata.*", }, } err = cp.Execute() From 5469c46c82da8472a22dce446a48ef2d1827c0db Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Mon, 26 Oct 2020 12:21:07 -0500 Subject: [PATCH 67/93] Fix zeek connection pipeline (#22151) - connection state for rejected is 'REJ' Closes #22149 --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9e79ccef1dd..ae48f268977 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -287,6 +287,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix checkpoint module when logs contain time field. {pull}20567[20567] - Add field limit check for AWS Cloudtrail flattened fields. {pull}21388[21388] {issue}21382[21382] - Fix syslog RFC 5424 parsing in the CheckPoint module. {pull}21854[21854] +- Fix incorrect connection state mapping in zeek connection pipeline. {pull}22151[22151] {issue}22149[22149] *Heartbeat* diff --git a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml index 9cd654edd51..c25c9cee6e5 100644 --- a/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml +++ b/x-pack/filebeat/module/zeek/connection/ingest/pipeline.yml @@ -115,7 +115,7 @@ processors: - connection - start - end - REG: + REJ: conn_str: "Connection attempt rejected." types: - connection From 5501ce848afca10590696ba1f4bb7426660ebec8 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 26 Oct 2020 17:58:22 +0000 Subject: [PATCH 68/93] [CI] set env variable for the params (#22143) --- Jenkinsfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 4099e820f97..95f270e9e64 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -12,6 +12,7 @@ pipeline { agent { label 'ubuntu-18 && immutable' } environment { AWS_ACCOUNT_SECRET = 'secret/observability-team/ci/elastic-observability-aws-account-auth' + AWS_REGION = "${params.awsRegion}" REPO = 'beats' BASE_DIR = "src/github.com/elastic/${env.REPO}" DOCKERELASTIC_SECRET = 'secret/observability-team/ci/docker-registry/prod' @@ -431,7 +432,7 @@ def withCloudTestEnv(Closure body) { error("${AWS_ACCOUNT_SECRET} doesn't contain 'secret_key'") } maskedVars.addAll([ - [var: "AWS_REGION", password: params.awsRegion], + [var: "AWS_REGION", password: "${env.AWS_REGION}"], [var: "AWS_ACCESS_KEY_ID", password: aws.access_key], [var: "AWS_SECRET_ACCESS_KEY", password: aws.secret_key], ]) From ad1722c88152f3b07fa712e66d12fa7aaa67624c Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 27 Oct 2020 11:30:30 +0100 Subject: [PATCH 69/93] [Ingest Manager] Skip flaky gateway tests #22177 [Ingest Manager] Skip flaky gateway tests #22177 --- x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go index cfcd1f46994..0d079cdf858 100644 --- a/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go +++ b/x-pack/elastic-agent/pkg/agent/application/fleet_gateway_test.go @@ -162,6 +162,7 @@ func wrapStrToResp(code int, body string) *http.Response { } func TestFleetGateway(t *testing.T) { + t.Skip("Flaky when CI is slower") agentInfo := &testAgentInfo{} settings := &fleetGatewaySettings{ From 367714988baa73e1cc76874556baeb8d96e8df8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 27 Oct 2020 11:41:50 +0100 Subject: [PATCH 70/93] Add documentation of filestream input (#21615) --- filebeat/docs/filebeat-options.asciidoc | 2 + .../input-filestream-file-options.asciidoc | 394 ++++++++++++++++++ .../input-filestream-reader-options.asciidoc | 143 +++++++ .../docs/inputs/input-filestream.asciidoc | 165 ++++++++ 4 files changed, 704 insertions(+) create mode 100644 filebeat/docs/inputs/input-filestream-file-options.asciidoc create mode 100644 filebeat/docs/inputs/input-filestream-reader-options.asciidoc create mode 100644 filebeat/docs/inputs/input-filestream.asciidoc diff --git a/filebeat/docs/filebeat-options.asciidoc b/filebeat/docs/filebeat-options.asciidoc index 8bbd06ec7f8..4cfa0961a20 100644 --- a/filebeat/docs/filebeat-options.asciidoc +++ b/filebeat/docs/filebeat-options.asciidoc @@ -94,6 +94,8 @@ include::inputs/input-container.asciidoc[] include::inputs/input-docker.asciidoc[] +include::inputs/input-filestream.asciidoc[] + include::../../x-pack/filebeat/docs/inputs/input-google-pubsub.asciidoc[] include::../../x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc[] diff --git a/filebeat/docs/inputs/input-filestream-file-options.asciidoc b/filebeat/docs/inputs/input-filestream-file-options.asciidoc new file mode 100644 index 00000000000..768960323f9 --- /dev/null +++ b/filebeat/docs/inputs/input-filestream-file-options.asciidoc @@ -0,0 +1,394 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by Filebeat inputs that use the input +//// to process files on disk (includes options for managing physical files) +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// + +[float] +[id="{beatname_lc}-input-{type}-exclude-files"] +=== Prospector options + +The prospector is running a file system watcher which looks for files specified +in the `paths` option. At the moment only simple file system scanning is +supported. + +==== Scanner options + +The scanner watches the configured paths. It scans the file system periodically +and returns the file system events to the Prospector. + +===== `prospector.scanner.exclude_files` + +A list of regular expressions to match the files that you want {beatname_uc} to +ignore. By default no files are excluded. + +The following example configures {beatname_uc} to ignore all the files that have +a `gz` extension: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + prospector.scanner.exclude_files: ['\.gz$'] +---- + +See <> for a list of supported regexp patterns. + +===== `prospector.scanner.symlinks` + +The `symlinks` option allows {beatname_uc} to harvest symlinks in addition to +regular files. When harvesting symlinks, {beatname_uc} opens and reads the +original file even though it reports the path of the symlink. + +When you configure a symlink for harvesting, make sure the original path is +excluded. If a single input is configured to harvest both the symlink and +the original file, {beatname_uc} will detect the problem and only process the +first file it finds. However, if two different inputs are configured (one +to read the symlink and the other the original path), both paths will be +harvested, causing {beatname_uc} to send duplicate data and the inputs to +overwrite each other's state. + +The `symlinks` option can be useful if symlinks to the log files have additional +metadata in the file name, and you want to process the metadata in Logstash. +This is, for example, the case for Kubernetes log files. + +Because this option may lead to data loss, it is disabled by default. + + +[float] +[id="{beatname_lc}-input-{type}-scan-frequency"] +===== `prospector.scanner.check_interval` + +How often {beatname_uc} checks for new files in the paths that are specified +for harvesting. For example, if you specify a glob like `/var/log/*`, the +directory is scanned for files using the frequency specified by +`check_interval`. Specify 1s to scan the directory as frequently as possible +without causing {beatname_uc} to scan too frequently. We do not recommend to set +this value `<1s`. + +If you require log lines to be sent in near real time do not use a very low +`check_interval` but adjust `close.on_state_change.inactive` so the file handler +stays open and constantly polls your files. + +The default setting is 10s. + +[float] +[id="{beatname_lc}-input-{type}-ignore-older"] +===== `ignore_older` + +If this option is enabled, {beatname_uc} ignores any files that were modified +before the specified timespan. Configuring `ignore_older` can be especially +useful if you keep log files for a long time. For example, if you want to start +{beatname_uc}, but only want to send the newest files and files from last week, +you can configure this option. + +You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is 0, +which disables the setting. Commenting out the config has the same effect as +setting it to 0. + +IMPORTANT: You must set `ignore_older` to be greater than `close.on_state_change.inactive`. + +The files affected by this setting fall into two categories: + +* Files that were never harvested +* Files that were harvested but weren't updated for longer than `ignore_older` + +For files which were never seen before, the offset state is set to the end of +the file. If a state already exist, the offset is not changed. In case a file is +updated again later, reading continues at the set offset position. + +The `ignore_older` setting relies on the modification time of the file to +determine if a file is ignored. If the modification time of the file is not +updated when lines are written to a file (which can happen on Windows), the +`ignore_older` setting may cause {beatname_uc} to ignore files even though +content was added at a later time. + +To remove the state of previously harvested files from the registry file, use +the `clean_inactive` configuration option. + +Before a file can be ignored by {beatname_uc}, the file must be closed. To +ensure a file is no longer being harvested when it is ignored, you must set +`ignore_older` to a longer duration than `close.on_state_change.inactive`. + +If a file that's currently being harvested falls under `ignore_older`, the +harvester will first finish reading the file and close it after +`close.on_state_change.inactive` is reached. Then, after that, the file will be ignored. + +[float] +[id="{beatname_lc}-input-{type}-close-options"] +===== `close.*` + +The `close.*` configuration options are used to close the harvester after a +certain criteria or time. Closing the harvester means closing the file handler. +If a file is updated after the harvester is closed, the file will be picked up +again after `prospector.scanner.check_interval` has elapsed. However, if the file +is moved or deleted while the harvester is closed, {beatname_uc} will not be able +to pick up the file again, and any data that the harvester hasn't read will be lost. + +The `close.on_state_change.*` settings are applied asynchronously +to read from a file, meaning that if {beatname_uc} is in a blocked state +due to blocked output, full queue or other issue, a file that would be +closed regardless. + + +[float] +[id="{beatname_lc}-input-{type}-close-inactive"] +===== `close.on_state_change.inactive` + +When this option is enabled, {beatname_uc} closes the file handle if a file has +not been harvested for the specified duration. The counter for the defined +period starts when the last log line was read by the harvester. It is not based +on the modification time of the file. If the closed file changes again, a new +harvester is started and the latest changes will be picked up after +`prospector.scanner.check_interval` has elapsed. + +We recommended that you set `close.on_state_change.inactive` to a value that is +larger than the least frequent updates to your log files. For example, if your +log files get updated every few seconds, you can safely set +`close.on_state_change.inactive` to `1m`. If there are log files with very +different update rates, you can use multiple configurations with different values. + +Setting `close.on_state_change.inactive` to a lower value means that file handles +are closed sooner. However this has the side effect that new log lines are not +sent in near real time if the harvester is closed. + +The timestamp for closing a file does not depend on the modification time of the +file. Instead, {beatname_uc} uses an internal timestamp that reflects when the +file was last harvested. For example, if `close.on_state_change.inactive` is set +to 5 minutes, the countdown for the 5 minutes starts after the harvester reads the +last line of the file. + +You can use time strings like 2h (2 hours) and 5m (5 minutes). The default is +5m. + +[float] +[id="{beatname_lc}-input-{type}-close-renamed"] +===== `close.on_state_change.renamed` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. + +When this option is enabled, {beatname_uc} closes the file handler when a file +is renamed. This happens, for example, when rotating files. By default, the +harvester stays open and keeps reading the file because the file handler does +not depend on the file name. If the `close.on_state_change.renamed` option is +enabled and the file is renamed or moved in such a way that it's no longer +matched by the file patterns specified for the , the file will not be picked +up again. {beatname_uc} will not finish reading the file. + +Do not use this option when `path` based `file_identity` is configured. It does +not make sense to enable the option, as Filebeat cannot detect renames using +path names as unique identifiers. + +WINDOWS: If your Windows log rotation system shows errors because it can't +rotate the files, you should enable this option. + +[float] +[id="{beatname_lc}-input-{type}-close-removed"] +===== `close.on_state_change.removed` + +When this option is enabled, {beatname_uc} closes the harvester when a file is +removed. Normally a file should only be removed after it's inactive for the +duration specified by `close.on_state_change.inactive`. However, if a file is +removed early and you don't enable `close.on_state_change.removed`, {beatname_uc} +keeps the file open to make sure the harvester has completed. If this setting +results in files that are not completely read because they are removed from +disk too early, disable this option. + +This option is enabled by default. If you disable this option, you must also +disable `clean.on_state_change.removed`. + +WINDOWS: If your Windows log rotation system shows errors because it can't +rotate files, make sure this option is enabled. + +[float] +[id="{beatname_lc}-input-{type}-close-eof"] +===== `close.reader.eof` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. + +When this option is enabled, {beatname_uc} closes a file as soon as the end of a +file is reached. This is useful when your files are only written once and not +updated from time to time. For example, this happens when you are writing every +single log event to a new file. This option is disabled by default. + +[float] +[id="{beatname_lc}-input-{type}-close-timeout"] +===== `close.reader.timeout` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. Another side effect is that multiline events might not be +completely sent before the timeout expires. + +When this option is enabled, {beatname_uc} gives every harvester a predefined +lifetime. Regardless of where the reader is in the file, reading will stop after +the `close.reader.after_interval` period has elapsed. This option can be useful for older log +files when you want to spend only a predefined amount of time on the files. +While `close.reader.after_interval` will close the file after the predefined timeout, if the +file is still being updated, {beatname_uc} will start a new harvester again per +the defined `scan_frequency`. And the close.reader.after_interval for this harvester will +start again with the countdown for the timeout. + +This option is particularly useful in case the output is blocked, which makes +{beatname_uc} keep open file handlers even for files that were deleted from the +disk. Setting `close.reader.after_interval` to `5m` ensures that the files are periodically +closed so they can be freed up by the operating system. + +If you set `close.reader.after_interval` to equal `ignore_older`, the file will not be picked +up if it's modified while the harvester is closed. This combination of settings +normally leads to data loss, and the complete file is not sent. + +When you use `close.reader.after_interval` for logs that contain multiline events, the +harvester might stop in the middle of a multiline event, which means that only +parts of the event will be sent. If the harvester is started again and the file +still exists, only the second part of the event will be sent. + +This option is set to 0 by default which means it is disabled. + + +[float] +[id="{beatname_lc}-input-{type}-clean-options"] +===== `clean_*` + +The `clean_*` options are used to clean up the state entries in the registry +file. These settings help to reduce the size of the registry file and can +prevent a potential <>. + +[float] +[id="{beatname_lc}-input-{type}-clean-inactive"] +===== `clean_inactive` + +WARNING: Only use this option if you understand that data loss is a potential +side effect. + +When this option is enabled, {beatname_uc} removes the state of a file after the +specified period of inactivity has elapsed. The state can only be removed if +the file is already ignored by {beatname_uc} (the file is older than +`ignore_older`). The `clean_inactive` setting must be greater than `ignore_older + +scan_frequency` to make sure that no states are removed while a file is still +being harvested. Otherwise, the setting could result in {beatname_uc} resending +the full content constantly because `clean_inactive` removes state for files +that are still detected by {beatname_uc}. If a file is updated or appears +again, the file is read from the beginning. + +The `clean_inactive` configuration option is useful to reduce the size of the +registry file, especially if a large amount of new files are generated every +day. + +This config option is also useful to prevent {beatname_uc} problems resulting +from inode reuse on Linux. For more information, see <>. + +NOTE: Every time a file is renamed, the file state is updated and the counter +for `clean_inactive` starts at 0 again. + +TIP: During testing, you might notice that the registry contains state entries +that should be removed based on the `clean_inactive` setting. This happens +because {beatname_uc} doesn't remove the entries until it opens the registry +again to read a different file. If you are testing the `clean_inactive` setting, +make sure {beatname_uc} is configured to read from more than one file, or the +file state will never be removed from the registry. + +[float] +[id="{beatname_lc}-input-{type}-clean-removed"] +===== `clean_removed` + +When this option is enabled, {beatname_uc} cleans files from the registry if +they cannot be found on disk anymore under the last known name. This means also +files which were renamed after the harvester was finished will be removed. This +option is enabled by default. + +If a shared drive disappears for a short period and appears again, all files +will be read again from the beginning because the states were removed from the +registry file. In such cases, we recommend that you disable the `clean_removed` +option. + +You must disable this option if you also disable `close_removed`. + +[float] +===== `backoff.*` + +The backoff options specify how aggressively {beatname_uc} crawls open files for +updates. You can use the default values in most cases. + +The `backoff` option defines how long {beatname_uc} waits before checking a file +again after EOF is reached. The default is 1s, which means the file is checked +every second if new lines were added. This enables near real-time crawling. +Every time a new line appears in the file, the `backoff` value is reset to the +initial value. The default is 1s. + +[float] +===== `backoff.init` + +The maximum time for {beatname_uc} to wait before checking a file again after +EOF is reached. After having backed off multiple times from checking the file, +the wait time will never exceed `max_backoff` regardless of what is specified +for `backoff_factor`. Because it takes a maximum of 10s to read a new line, +specifying 10s for `max_backoff` means that, at the worst, a new line could be +added to the log file if {beatname_uc} has backed off multiple times. The +default is 10s. + +Requirement: Set `max_backoff` to be greater than or equal to `backoff` and +less than or equal to `scan_frequency` (`backoff <= max_backoff <= scan_frequency`). +If `max_backoff` needs to be higher, it is recommended to close the file handler +instead and let {beatname_uc} pick up the file again. + +[float] +===== `backoff.max` + +The maximum time for {beatname_uc} to wait before checking a file again after +EOF is reached. After having backed off multiple times from checking the file, +the wait time will never exceed `max_backoff` regardless of what is specified +for `backoff_factor`. Because it takes a maximum of 10s to read a new line, +specifying 10s for `max_backoff` means that, at the worst, a new line could be +added to the log file if {beatname_uc} has backed off multiple times. The +default is 10s. + +Requirement: Set `max_backoff` to be greater than or equal to `backoff` and +less than or equal to `scan_frequency` (`backoff <= max_backoff <= scan_frequency`). +If `max_backoff` needs to be higher, it is recommended to close the file handler +instead and let {beatname_uc} pick up the file again. + +[float] +===== `file_identity` + +Different `file_identity` methods can be configured to suit the +environment where you are collecting log messages. + + +*`native`*:: The default behaviour of {beatname_uc} is to differentiate +between files using their inodes and device ids. + +[source,yaml] +---- +file_identity.native: ~ +---- + +*`path`*:: To identify files based on their paths use this strategy. + +WARNING: Only use this strategy if your log files are rotated to a folder +outside of the scope of your input or not at all. Otherwise you end up +with duplicated events. + +WARNING: This strategy does not support renaming files. +If an input file is renamed, {beatname_uc} will read it again if the new path +matches the settings of the input. + +[source,yaml] +---- +file_identity.path: ~ +---- + +*`inode_marker`*:: If the device id changes from time to time, you must use +this method to distinguish files. This option is not supported on Windows. + +Set the location of the marker file the following way: + +[source,yaml] +---- +file_identity.inode_marker.path: /logs/.filebeat-marker +---- + diff --git a/filebeat/docs/inputs/input-filestream-reader-options.asciidoc b/filebeat/docs/inputs/input-filestream-reader-options.asciidoc new file mode 100644 index 00000000000..8b365f1ede2 --- /dev/null +++ b/filebeat/docs/inputs/input-filestream-reader-options.asciidoc @@ -0,0 +1,143 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by Filebeat inputs that use the input +//// but do not process files (the options for managing files +//// on disk are not relevant) +//// If you add IDs to sections, make sure you use attributes to create +//// unique IDs for each input that includes this file. Use the format: +//// [id="{beatname_lc}-input-{type}-option-name"] +////////////////////////////////////////////////////////////////////////// + +[float] +===== `encoding` + +The file encoding to use for reading data that contains international +characters. See the encoding names http://www.w3.org/TR/encoding/[recommended by +the W3C for use in HTML5]. + +Valid encodings: + + * `plain`: plain ASCII encoding + * `utf-8` or `utf8`: UTF-8 encoding + * `gbk`: simplified Chinese charaters + * `iso8859-6e`: ISO8859-6E, Latin/Arabic + * `iso8859-6i`: ISO8859-6I, Latin/Arabic + * `iso8859-8e`: ISO8859-8E, Latin/Hebrew + * `iso8859-8i`: ISO8859-8I, Latin/Hebrew + * `iso8859-1`: ISO8859-1, Latin-1 + * `iso8859-2`: ISO8859-2, Latin-2 + * `iso8859-3`: ISO8859-3, Latin-3 + * `iso8859-4`: ISO8859-4, Latin-4 + * `iso8859-5`: ISO8859-5, Latin/Cyrillic + * `iso8859-6`: ISO8859-6, Latin/Arabic + * `iso8859-7`: ISO8859-7, Latin/Greek + * `iso8859-8`: ISO8859-8, Latin/Hebrew + * `iso8859-9`: ISO8859-9, Latin-5 + * `iso8859-10`: ISO8859-10, Latin-6 + * `iso8859-13`: ISO8859-13, Latin-7 + * `iso8859-14`: ISO8859-14, Latin-8 + * `iso8859-15`: ISO8859-15, Latin-9 + * `iso8859-16`: ISO8859-16, Latin-10 + * `cp437`: IBM CodePage 437 + * `cp850`: IBM CodePage 850 + * `cp852`: IBM CodePage 852 + * `cp855`: IBM CodePage 855 + * `cp858`: IBM CodePage 858 + * `cp860`: IBM CodePage 860 + * `cp862`: IBM CodePage 862 + * `cp863`: IBM CodePage 863 + * `cp865`: IBM CodePage 865 + * `cp866`: IBM CodePage 866 + * `ebcdic-037`: IBM CodePage 037 + * `ebcdic-1040`: IBM CodePage 1140 + * `ebcdic-1047`: IBM CodePage 1047 + * `koi8r`: KOI8-R, Russian (Cyrillic) + * `koi8u`: KOI8-U, Ukranian (Cyrillic) + * `macintosh`: Macintosh encoding + * `macintosh-cyrillic`: Macintosh Cyrillic encoding + * `windows1250`: Windows1250, Central and Eastern European + * `windows1251`: Windows1251, Russian, Serbian (Cyrillic) + * `windows1252`: Windows1252, Legacy + * `windows1253`: Windows1253, Modern Greek + * `windows1254`: Windows1254, Turkish + * `windows1255`: Windows1255, Hebrew + * `windows1256`: Windows1256, Arabic + * `windows1257`: Windows1257, Estonian, Latvian, Lithuanian + * `windows1258`: Windows1258, Vietnamese + * `windows874`: Windows874, ISO/IEC 8859-11, Latin/Thai + * `utf-16-bom`: UTF-16 with required BOM + * `utf-16be-bom`: big endian UTF-16 with required BOM + * `utf-16le-bom`: little endian UTF-16 with required BOM + +The `plain` encoding is special, because it does not validate or transform any input. + +[float] +[id="{beatname_lc}-input-{type}-exclude-lines"] +===== `exclude_lines` + +A list of regular expressions to match the lines that you want {beatname_uc} to +exclude. {beatname_uc} drops any lines that match a regular expression in the +list. By default, no lines are dropped. Empty lines are ignored. + +The following example configures {beatname_uc} to drop any lines that start with +`DBG`. + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + exclude_lines: ['^DBG'] +---- + +See <> for a list of supported regexp patterns. + +[float] +[id="{beatname_lc}-input-{type}-include-lines"] +===== `include_lines` + +A list of regular expressions to match the lines that you want {beatname_uc} to +include. {beatname_uc} exports only the lines that match a regular expression in +the list. By default, all lines are exported. Empty lines are ignored. + +The following example configures {beatname_uc} to export any lines that start +with `ERR` or `WARN`: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + include_lines: ['^ERR', '^WARN'] +---- + +NOTE: If both `include_lines` and `exclude_lines` are defined, {beatname_uc} +executes `include_lines` first and then executes `exclude_lines`. The order in +which the two options are defined doesn't matter. The `include_lines` option +will always be executed before the `exclude_lines` option, even if +`exclude_lines` appears before `include_lines` in the config file. + +The following example exports all log lines that contain `sometext`, +except for lines that begin with `DBG` (debug messages): + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: {type} + ... + include_lines: ['sometext'] + exclude_lines: ['^DBG'] +---- + +See <> for a list of supported regexp patterns. + +[float] +===== `buffer_size` + +The size in bytes of the buffer that each harvester uses when fetching a file. +The default is 16384. + +[float] +===== `message_max_bytes` + +The maximum number of bytes that a single log message can have. All bytes after +`mesage_max_bytes` are discarded and not sent. The default is 10MB (10485760). diff --git a/filebeat/docs/inputs/input-filestream.asciidoc b/filebeat/docs/inputs/input-filestream.asciidoc new file mode 100644 index 00000000000..0a02a865465 --- /dev/null +++ b/filebeat/docs/inputs/input-filestream.asciidoc @@ -0,0 +1,165 @@ +:type: filestream + +[id="{beatname_lc}-input-{type}"] +=== filestream input + +experimental[] + +++++ +filestream +++++ + +Use the `filestream` input to read lines from active log files. It is the +new, improved alternative to the `log` input. However, a few feature are +missing from it, e.g. `multiline` or other special parsing capabilities. +These missing options are probably going to be added again. We strive to +achieve feature parity, if possible. + +To configure this input, specify a list of glob-based <> +that must be crawled to locate and fetch the log lines. + +Example configuration: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: filestream + paths: + - /var/log/messages + - /var/log/*.log +---- + + +You can apply additional +<<{beatname_lc}-input-{type}-options,configuration settings>> (such as `fields`, +`include_lines`, `exclude_lines` and so on) to the lines harvested +from these files. The options that you specify are applied to all the files +harvested by this input. + +To apply different configuration settings to different files, you need to define +multiple input sections: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: filestream <1> + paths: + - /var/log/system.log + - /var/log/wifi.log +- type: filestream <2> + paths: + - "/var/log/apache2/*" + fields: + apache: true +---- + +<1> Harvests lines from two files: `system.log` and +`wifi.log`. +<2> Harvests lines from every file in the `apache2` directory, and uses the +`fields` configuration option to add a field called `apache` to the output. + + +[[filestream-file-identity]] +==== Reading files on network shares and cloud providers + +:WARNING: Filebeat does not support reading from network shares and cloud providers. + +However, one of the limitations of these data sources can be mitigated +if you configure Filebeat adequately. + +By default, {beatname_uc} identifies files based on their inodes and +device IDs. However, on network shares and cloud providers these +values might change during the lifetime of the file. If this happens +{beatname_uc} thinks that file is new and resends the whole content +of the file. To solve this problem you can configure `file_identity` option. Possible +values besides the default `inode_deviceid` are `path` and `inode_marker`. + +Selecting `path` instructs {beatname_uc} to identify files based on their +paths. This is a quick way to avoid rereading files if inode and device ids +might change. However, keep in mind if the files are rotated (renamed), they +will be reread and resubmitted. + +The option `inode_marker` can be used if the inodes stay the same even if +the device id is changed. You should choose this method if your files are +rotated instead of `path` if possible. You have to configure a marker file +readable by {beatname_uc} and set the path in the option `path` of `inode_marker`. + +The content of this file must be unique to the device. You can put the +UUID of the device or mountpoint where the input is stored. The following +example oneliner generates a hidden marker file for the selected mountpoint `/logs`: +Please note that you should not use this option on Windows as file identifiers might be +more volatile. + +["source","sh",subs="attributes"] +---- +$ lsblk -o MOUNTPOINT,UUID | grep /logs | awk '{print $2}' >> /logs/.filebeat-marker +---- + +To set the generated file as a marker for `file_identity` you should configure +the input the following way: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: filestream + paths: + - /logs/*.log + file_identity.inode_marker.path: /logs/.filebeat-marker +---- + + +[[filestream-rotating-logs]] +==== Reading from rotating logs + +When dealing with file rotation, avoid harvesting symlinks. Instead +use the <> setting to point to the original file, and specify +a pattern that matches the file you want to harvest and all of its rotated +files. Also make sure your log rotation strategy prevents lost or duplicate +messages. For more information, see <>. + +Furthermore, to avoid duplicate of rotated log messages, do not use the +`path` method for `file_identity`. Or exclude the rotated files with `exclude_files` +option. + +[id="{beatname_lc}-input-{type}-options"] +==== Prospector options + +The `filestream` input supports the following configuration options plus the +<<{beatname_lc}-input-{type}-common-options>> described later. + +[float] +[[filestream-input-paths]] +===== `paths` + +A list of glob-based paths that will be crawled and fetched. All patterns +supported by https://golang.org/pkg/path/filepath/#Glob[Go Glob] are also +supported here. For example, to fetch all files from a predefined level of +subdirectories, the following pattern can be used: `/var/log/*/*.log`. This +fetches all `.log` files from the subfolders of `/var/log`. It does not +fetch log files from the `/var/log` folder itself. +It is possible to recursively fetch all files in all subdirectories of a directory +using the optional <> settings. + +{beatname_uc} starts a harvester for each file that it finds under the specified +paths. You can specify one path per line. Each line begins with a dash (-). + +[float] +[[filestream-recursive-glob]] +===== `prospector.scanner.recursive_glob` + +Enable expanding `**` into recursive glob patterns. With this feature enabled, +the rightmost `**` in each path is expanded into a fixed number of glob +patterns. For example: `/foo/**` expands to `/foo`, `/foo/*`, `/foo/*/*`, and so +on. If enabled it expands a single `**` into a 8-level deep `*` pattern. + +This feature is enabled by default. Set `prospector.scanner.recursive_glob` to false to +disable it. + +include::../inputs/input-filestream-reader-options.asciidoc[] + +include::../inputs/input-filestream-file-options.asciidoc[] + +[id="{beatname_lc}-input-{type}-common-options"] +include::../inputs/input-common-options.asciidoc[] + +:type!: From d671e5275520c8b2d95ab4d67de9e6cfacb1a054 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Tue, 27 Oct 2020 12:28:48 +0100 Subject: [PATCH 71/93] [filebeat][okta] Make cursor optional for okta and update docs (#22091) * Make cursor optional for okta and update docs * Remove keep_state flag --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/modules/okta.asciidoc | 15 +++++++++------ x-pack/filebeat/module/okta/_meta/docs.asciidoc | 15 +++++++++------ 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ae48f268977..99008bf7181 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -640,6 +640,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Adding support for FIPS in s3 input {pull}21446[21446] - Add SSL option to checkpoint module {pull}19560[19560] - Add max_number_of_messages config into s3 input. {pull}21993[21993] +- Update Okta documentation for new stateful restarts. {pull}22091[22091] *Heartbeat* diff --git a/filebeat/docs/modules/okta.asciidoc b/filebeat/docs/modules/okta.asciidoc index 038f6d088dd..d1f8e6ea2ec 100644 --- a/filebeat/docs/modules/okta.asciidoc +++ b/filebeat/docs/modules/okta.asciidoc @@ -32,12 +32,6 @@ the logs while honoring any https://developer.okta.com/docs/reference/rate-limits/[rate-limiting] headers sent by Okta. -NOTE: This module does not persist the timestamp of the last read event in -order to facilitate resuming on restart. This feature will be coming in a future -version. When you restart the module will read events from the beginning of the -log. To minimize duplicates documents the module uses the event's Okta UUID -value as the Elasticsearch `_id`. - This is an example configuration for the module. [source,yaml] @@ -99,6 +93,15 @@ information. supported_protocols: [TLSv1.2] ---- +*`var.initial_interval`*:: + +An initial interval can be defined. The first time the module starts, will fetch events from the current moment minus the initial interval value. Following restarts will fetch events starting from the last event read. It defaults to `24h`. ++ +[source,yaml] +---- + var.initial_interval: 24h # will fetch events starting 24h ago. +---- + [float] === Example dashboard diff --git a/x-pack/filebeat/module/okta/_meta/docs.asciidoc b/x-pack/filebeat/module/okta/_meta/docs.asciidoc index 1ea5cc6a66d..297a8644987 100644 --- a/x-pack/filebeat/module/okta/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/okta/_meta/docs.asciidoc @@ -27,12 +27,6 @@ the logs while honoring any https://developer.okta.com/docs/reference/rate-limits/[rate-limiting] headers sent by Okta. -NOTE: This module does not persist the timestamp of the last read event in -order to facilitate resuming on restart. This feature will be coming in a future -version. When you restart the module will read events from the beginning of the -log. To minimize duplicates documents the module uses the event's Okta UUID -value as the Elasticsearch `_id`. - This is an example configuration for the module. [source,yaml] @@ -94,6 +88,15 @@ information. supported_protocols: [TLSv1.2] ---- +*`var.initial_interval`*:: + +An initial interval can be defined. The first time the module starts, will fetch events from the current moment minus the initial interval value. Following restarts will fetch events starting from the last event read. It defaults to `24h`. ++ +[source,yaml] +---- + var.initial_interval: 24h # will fetch events starting 24h ago. +---- + [float] === Example dashboard From f0da6811f95298dedf3cbbfe9fca8591b1365c11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 27 Oct 2020 13:01:02 +0100 Subject: [PATCH 72/93] Add new licence status: expired (#22180) ## What does this PR do? This PR adds a new licence state named `Expired`. Previously, this prevented Beats from connecting to ES. ## Why is it important? Beats were not able to parse expired licences. This problem prevented users from using the features of the software which does not require a licence. ## Related issues Closes #21112 --- CHANGELOG.next.asciidoc | 1 + x-pack/libbeat/licenser/license.go | 5 +++++ x-pack/libbeat/licenser/license_test.go | 5 +++++ .../xpack-with-relax-expired-license-uuid.json | 13 +++++++++++++ x-pack/libbeat/licenser/types.go | 2 ++ 5 files changed, 26 insertions(+) create mode 100644 x-pack/libbeat/licenser/testdata/xpack-with-relax-expired-license-uuid.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 99008bf7181..1235422f1dc 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -188,6 +188,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21259[21258] - Orderly close processors when processing pipelines are not needed anymore to release their resources. {pull}16349[16349] - Fix memory leak and events duplication in docker autodiscover and add_docker_metadata. {pull}21851[21851] +- Fix parsing of expired licences. {issue}21112[21112] {pull}22180[22180] *Auditbeat* diff --git a/x-pack/libbeat/licenser/license.go b/x-pack/libbeat/licenser/license.go index e1c64fb314b..179c1c2f088 100644 --- a/x-pack/libbeat/licenser/license.go +++ b/x-pack/libbeat/licenser/license.go @@ -68,6 +68,11 @@ func (l *License) IsActive() bool { return l.Status == Active } +// IsExpired returns true if the licence has expired. +func (l *License) IsExpired() bool { + return l.Status == Expired +} + // IsTrial returns true if the remote cluster is in trial mode. func (l *License) IsTrial() bool { return l.Type == Trial diff --git a/x-pack/libbeat/licenser/license_test.go b/x-pack/libbeat/licenser/license_test.go index d8c8882c2fb..f21e6931e9b 100644 --- a/x-pack/libbeat/licenser/license_test.go +++ b/x-pack/libbeat/licenser/license_test.go @@ -132,6 +132,11 @@ func TestIsActive(t *testing.T) { l: License{Status: Inactive}, expected: false, }, + { + name: "expired", + l: License{Status: Expired}, + expected: false, + }, } for _, test := range tests { diff --git a/x-pack/libbeat/licenser/testdata/xpack-with-relax-expired-license-uuid.json b/x-pack/libbeat/licenser/testdata/xpack-with-relax-expired-license-uuid.json new file mode 100644 index 00000000000..9a933ca3de9 --- /dev/null +++ b/x-pack/libbeat/licenser/testdata/xpack-with-relax-expired-license-uuid.json @@ -0,0 +1,13 @@ +{ + "build": { + "hash": "053779d", + "date": "2018-07-20T05:25:16.206115Z" + }, + "license": { + "uid": "hello-license", + "type": "platinum", + "mode": "platinum", + "status": "expired", + "expiry_date_in_millis": 1588261199999 + } +} diff --git a/x-pack/libbeat/licenser/types.go b/x-pack/libbeat/licenser/types.go index 0e819275808..f0d3b64898d 100644 --- a/x-pack/libbeat/licenser/types.go +++ b/x-pack/libbeat/licenser/types.go @@ -25,11 +25,13 @@ type State int const ( Inactive State = iota Active + Expired ) var stateLookup = map[string]State{ "inactive": Inactive, "active": Active, + "expired": Expired, } var licenseLookup = map[string]LicenseType{ From 5f83f604665c0b5074f973dfb0c65cdcd876f308 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 27 Oct 2020 08:29:39 -0400 Subject: [PATCH 73/93] [Elastic Agent] Ensure shell wrapper path exists on install. (#22144) * Ensure wrapper path on install. * Add changelog. --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + x-pack/elastic-agent/pkg/agent/install/install.go | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index b6a870e0259..68407c0ecd1 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -23,6 +23,7 @@ - Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] - Fix issue with named pipes on Windows 7 {pull}21931[21931] - Fix missing elastic_agent event data {pull}21994[21994] +- Ensure shell wrapper path exists before writing wrapper on install {pull}22144[22144] ==== New features diff --git a/x-pack/elastic-agent/pkg/agent/install/install.go b/x-pack/elastic-agent/pkg/agent/install/install.go index 2705ea0bfd9..01b9bd6f616 100644 --- a/x-pack/elastic-agent/pkg/agent/install/install.go +++ b/x-pack/elastic-agent/pkg/agent/install/install.go @@ -53,7 +53,10 @@ func Install() error { // place shell wrapper, if present on platform if ShellWrapperPath != "" { - err = ioutil.WriteFile(ShellWrapperPath, []byte(ShellWrapper), 0755) + err = os.MkdirAll(filepath.Dir(ShellWrapperPath), 0755) + if err == nil { + err = ioutil.WriteFile(ShellWrapperPath, []byte(ShellWrapper), 0755) + } if err != nil { return errors.New( err, From f07cc32037a91a37fcc8ad57359fc063fbddeb04 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 27 Oct 2020 10:04:03 -0400 Subject: [PATCH 74/93] [Elastic Agent] Fix deb/rpm installation (#22153) * Fix issue on elastic-agent systemd unit. * Fix service file for fedora. * Add changelog. --- dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl | 4 +--- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl b/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl index f7ffb886884..5fa0b1436b0 100644 --- a/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl +++ b/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl @@ -9,10 +9,8 @@ After=network-online.target User={{ .BeatUser }} Group={{ .BeatUser }} {{- end }} -Environment="BEAT_LOG_OPTS=" Environment="BEAT_CONFIG_OPTS=-c /etc/{{.BeatName}}/{{.BeatName}}.yml" -Environment="BEAT_PATH_OPTS=--path.home /var/lib/{{.BeatName}} --path.config /etc/{{.BeatName}} --path.data /var/lib/{{.BeatName}}/data --path.logs /var/log/{{.BeatName}}" -ExecStart=/usr/share/{{.BeatName}}/bin/{{.BeatName}} --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS +ExecStart=/usr/bin/{{.BeatName}} run --environment systemd $BEAT_CONFIG_OPTS Restart=always [Install] diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 68407c0ecd1..9a032035068 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -24,6 +24,7 @@ - Fix issue with named pipes on Windows 7 {pull}21931[21931] - Fix missing elastic_agent event data {pull}21994[21994] - Ensure shell wrapper path exists before writing wrapper on install {pull}22144[22144] +- Fix deb/rpm packaging for Elastic Agent {pull}22153[22153] ==== New features From 3e73c99391799badcb5b5e8e63eba779928d2426 Mon Sep 17 00:00:00 2001 From: Andrew Stucki Date: Tue, 27 Oct 2020 10:49:57 -0400 Subject: [PATCH 75/93] [Packetbeat] Create x-pack magefile (#21979) * Create packetbeat x-pack magefile * add changelog entry * Fix up packetbeat magefile * Add makefile for x-pack directory * Uncomment packaging * Add desired platforms to Jenkinsfile * Add back in fields yml and kibana dashboards on package * Add generated configs in x-pack/packetbeat * Suggested changes --- .ci/packaging.groovy | 2 +- CHANGELOG.next.asciidoc | 1 + Makefile | 2 +- packetbeat/magefile.go | 323 +--- packetbeat/scripts/mage/config.go | 27 + packetbeat/scripts/mage/package.go | 72 + packetbeat/scripts/mage/pcap.go | 274 +++ x-pack/packetbeat/Jenkinsfile.yml | 30 +- x-pack/packetbeat/Makefile | 3 + x-pack/packetbeat/magefile.go | 102 + x-pack/packetbeat/packetbeat.docker.yml | 50 + x-pack/packetbeat/packetbeat.reference.yml | 2045 ++++++++++++++++++++ x-pack/packetbeat/packetbeat.yml | 277 +++ 13 files changed, 2888 insertions(+), 320 deletions(-) create mode 100644 packetbeat/scripts/mage/package.go create mode 100644 packetbeat/scripts/mage/pcap.go create mode 100644 x-pack/packetbeat/Makefile create mode 100644 x-pack/packetbeat/magefile.go create mode 100644 x-pack/packetbeat/packetbeat.docker.yml create mode 100644 x-pack/packetbeat/packetbeat.reference.yml create mode 100644 x-pack/packetbeat/packetbeat.yml diff --git a/.ci/packaging.groovy b/.ci/packaging.groovy index 073c977a22e..e86906fd8bb 100644 --- a/.ci/packaging.groovy +++ b/.ci/packaging.groovy @@ -91,7 +91,7 @@ pipeline { 'x-pack/heartbeat', // 'x-pack/journalbeat', 'x-pack/metricbeat', - // 'x-pack/packetbeat', + 'x-pack/packetbeat', 'x-pack/winlogbeat' ) } diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1235422f1dc..3022e1a5215 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -782,6 +782,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add ECS fields for x509 certs, event categorization, and related IP info. {pull}19167[19167] - Add 100-continue support {issue}15830[15830] {pull}19349[19349] - Add initial SIP protocol support {pull}21221[21221] +- Change build process for x-pack distribution {pull}21979[21979] *Functionbeat* diff --git a/Makefile b/Makefile index d64bb07776b..84ac9bb6e1a 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ XPACK_SUFFIX=x-pack/ # PROJECTS_XPACK_PKG is a list of Beats that have independent packaging support # in the x-pack directory (rather than having the OSS build produce both sets # of artifacts). This will be removed once we complete the transition. -PROJECTS_XPACK_PKG=x-pack/auditbeat x-pack/dockerlogbeat x-pack/filebeat x-pack/heartbeat x-pack/metricbeat x-pack/winlogbeat +PROJECTS_XPACK_PKG=x-pack/auditbeat x-pack/dockerlogbeat x-pack/filebeat x-pack/heartbeat x-pack/metricbeat x-pack/winlogbeat x-pack/packetbeat # PROJECTS_XPACK_MAGE is a list of Beats whose primary build logic is based in # Mage. For compatibility with CI testing these projects support a subset of the # makefile targets. After all Beats converge to primarily using Mage we can diff --git a/packetbeat/magefile.go b/packetbeat/magefile.go index fd00c932ab4..e8973334621 100644 --- a/packetbeat/magefile.go +++ b/packetbeat/magefile.go @@ -21,13 +21,9 @@ package main import ( "fmt" - "log" - "strings" "time" "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" - "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" packetbeat "github.com/elastic/beats/v7/packetbeat/scripts/mage" @@ -44,7 +40,7 @@ import ( func init() { common.RegisterCheckDeps(Update) - unittest.RegisterPythonTestDeps(fieldsYML, Dashboards) + unittest.RegisterPythonTestDeps(packetbeat.FieldsYML, Dashboards) devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." } @@ -57,21 +53,7 @@ func Build() error { // GolangCrossBuild build the Beat binary inside of the golang-builder. // Do not use directly, use crossBuild instead. func GolangCrossBuild() error { - if dep, found := crossBuildDeps[devtools.Platform.Name]; found { - mg.Deps(dep) - } - - params := devtools.DefaultGolangCrossBuildArgs() - if flags, found := libpcapLDFLAGS[devtools.Platform.Name]; found { - params.Env = map[string]string{ - "CGO_LDFLAGS": flags, - } - } - if flags, found := libpcapCFLAGS[devtools.Platform.Name]; found { - params.Env["CGO_CFLAGS"] = flags - } - - return devtools.GolangCrossBuild(params) + return packetbeat.GolangCrossBuild() } // BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). @@ -118,12 +100,12 @@ func Package() { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() - devtools.UseElasticBeatPackaging() + devtools.UseElasticBeatOSSPackaging() devtools.PackageKibanaDashboardsFromBuildDir() - customizePackaging() + packetbeat.CustomizePackaging() mg.Deps(Update) - mg.Deps(CrossBuild, CrossBuildXPack, CrossBuildGoDaemon) + mg.Deps(CrossBuild, CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, TestPackages) } @@ -151,27 +133,7 @@ func includeList() error { // Fields generates fields.yml and fields.go files for the Beat. func Fields() { - mg.Deps(libbeatAndPacketbeatCommonFieldsGo, protosFieldsGo) - mg.Deps(fieldsYML) -} - -// libbeatAndPacketbeatCommonFieldsGo generates a fields.go containing both -// libbeat and packetbeat's common fields. -func libbeatAndPacketbeatCommonFieldsGo() error { - if err := devtools.GenerateFieldsYAML(); err != nil { - return err - } - return devtools.GenerateAllInOneFieldsGo() -} - -// protosFieldsGo generates a fields.go for each protocol. -func protosFieldsGo() error { - return devtools.GenerateModuleFieldsGo("protos") -} - -// fieldsYML generates the fields.yml file containing all fields. -func fieldsYML() error { - return devtools.GenerateFieldsYAML("protos") + packetbeat.Fields() } func fieldDocs() error { @@ -182,276 +144,3 @@ func fieldDocs() error { func Dashboards() error { return devtools.KibanaDashboards("protos") } - -// ----------------------------------------------------------------------------- -// Customizations specific to Packetbeat. -// - Config file contains an OS specific device name (affects darwin, windows). -// - Must compile libpcap or winpcap during cross-compilation. -// - On Linux libpcap is statically linked. Darwin and Windows are dynamic. - -const ( - libpcapURL = "https://s3.amazonaws.com/beats-files/deps/libpcap-1.8.1.tar.gz" - libpcapSHA256 = "673dbc69fdc3f5a86fb5759ab19899039a8e5e6c631749e48dcd9c6f0c83541e" -) - -const ( - linuxPcapLDFLAGS = "-L/libpcap/libpcap-1.8.1 -lpcap" - linuxPcapCFLAGS = "-I /libpcap/libpcap-1.8.1" -) - -var libpcapLDFLAGS = map[string]string{ - "linux/386": linuxPcapLDFLAGS, - "linux/amd64": linuxPcapLDFLAGS, - "linux/arm64": linuxPcapLDFLAGS, - "linux/armv5": linuxPcapLDFLAGS, - "linux/armv6": linuxPcapLDFLAGS, - "linux/armv7": linuxPcapLDFLAGS, - "linux/mips": linuxPcapLDFLAGS, - "linux/mipsle": linuxPcapLDFLAGS, - "linux/mips64": linuxPcapLDFLAGS, - "linux/mips64le": linuxPcapLDFLAGS, - "linux/ppc64le": linuxPcapLDFLAGS, - "linux/s390x": linuxPcapLDFLAGS, - "darwin/amd64": "-lpcap", - "windows/amd64": "-L /libpcap/win/WpdPack/Lib/x64 -lwpcap", - "windows/386": "-L /libpcap/win/WpdPack/Lib -lwpcap", -} - -var libpcapCFLAGS = map[string]string{ - "linux/386": linuxPcapCFLAGS, - "linux/amd64": linuxPcapCFLAGS, - "linux/arm64": linuxPcapCFLAGS, - "linux/armv5": linuxPcapCFLAGS, - "linux/armv6": linuxPcapCFLAGS, - "linux/armv7": linuxPcapCFLAGS, - "linux/mips": linuxPcapCFLAGS, - "linux/mipsle": linuxPcapCFLAGS, - "linux/mips64": linuxPcapCFLAGS, - "linux/mips64le": linuxPcapCFLAGS, - "linux/ppc64le": linuxPcapCFLAGS, - "linux/s390x": linuxPcapCFLAGS, - "windows/amd64": "-I /libpcap/win/WpdPack/Include", - "windows/386": "-I /libpcap/win/WpdPack/Include", -} - -var crossBuildDeps = map[string]func() error{ - "linux/386": buildLibpcapLinux386, - "linux/amd64": buildLibpcapLinuxAMD64, - "linux/arm64": buildLibpcapLinuxARM64, - "linux/armv5": buildLibpcapLinuxARMv5, - "linux/armv6": buildLibpcapLinuxARMv6, - "linux/armv7": buildLibpcapLinuxARMv7, - "linux/mips": buildLibpcapLinuxMIPS, - "linux/mipsle": buildLibpcapLinuxMIPSLE, - "linux/mips64": buildLibpcapLinuxMIPS64, - "linux/mips64le": buildLibpcapLinuxMIPS64LE, - "linux/ppc64le": buildLibpcapLinuxPPC64LE, - "linux/s390x": buildLibpcapLinuxS390x, - "windows/amd64": installLibpcapWindowsAMD64, - "windows/386": installLibpcapWindows386, -} - -// buildLibpcapFromSource builds libpcap from source because the library needs -// to be compiled with -fPIC. -// See https://github.com/elastic/beats/v7/pull/4217. -func buildLibpcapFromSource(params map[string]string) error { - tarFile, err := devtools.DownloadFile(libpcapURL, "/libpcap") - if err != nil { - return errors.Wrap(err, "failed to download libpcap source") - } - - if err = devtools.VerifySHA256(tarFile, libpcapSHA256); err != nil { - return err - } - - if err = devtools.Extract(tarFile, "/libpcap"); err != nil { - return errors.Wrap(err, "failed to extract libpcap") - } - - var configureArgs []string - for k, v := range params { - if strings.HasPrefix(k, "-") { - delete(params, k) - configureArgs = append(configureArgs, k+"="+v) - } - } - - // Use sh -c here because sh.Run does not expose a way to change the CWD. - // This command only runs in Linux so this is fine. - return sh.RunWith(params, "sh", "-c", - "cd /libpcap/libpcap-1.8.1 && "+ - "./configure --enable-usb=no --enable-bluetooth=no --enable-dbus=no "+strings.Join(configureArgs, " ")+"&& "+ - "make") -} - -func buildLibpcapLinux386() error { - return buildLibpcapFromSource(map[string]string{ - "CFLAGS": "-m32", - "LDFLAGS": "-m32", - }) -} - -func buildLibpcapLinuxAMD64() error { - return buildLibpcapFromSource(map[string]string{}) -} - -func buildLibpcapLinuxARM64() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "aarch64-unknown-linux-gnu", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxARMv5() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "arm-linux-gnueabi", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxARMv6() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "arm-linux-gnueabi", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxARMv7() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "arm-linux-gnueabihf", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxMIPS() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "mips-unknown-linux-gnu", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxMIPSLE() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "mipsle-unknown-linux-gnu", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxMIPS64() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "mips64-unknown-linux-gnu", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxMIPS64LE() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "mips64le-unknown-linux-gnu", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxPPC64LE() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "powerpc64le-linux-gnu", - "--with-pcap": "linux", - }) -} - -func buildLibpcapLinuxS390x() error { - return buildLibpcapFromSource(map[string]string{ - "--host": "s390x-ibm-linux-gnu", - "--with-pcap": "linux", - }) -} - -func installLibpcapWindowsAMD64() error { - mg.SerialDeps(installWinpcap, generateWin64StaticWinpcap) - return nil -} - -func installLibpcapWindows386() error { - return installWinpcap() -} - -func installWinpcap() error { - log.Println("Install Winpcap") - const wpdpackURL = "https://www.winpcap.org/install/bin/WpdPack_4_1_2.zip" - - winpcapZip, err := devtools.DownloadFile(wpdpackURL, "/") - if err != nil { - return err - } - - if err = devtools.Extract(winpcapZip, "/libpcap/win"); err != nil { - return err - } - - return nil -} - -func generateWin64StaticWinpcap() error { - log.Println(">> Generating 64-bit winpcap static lib") - - // Notes: We are using absolute path to make sure the files - // are available for x-pack build. - // Ref: https://github.com/elastic/beats/v7/issues/1259 - defer devtools.DockerChown(devtools.MustExpand("{{elastic_beats_dir}}/{{.BeatName}}/lib")) - return devtools.RunCmds( - // Requires mingw-w64-tools. - []string{"gendef", devtools.MustExpand("{{elastic_beats_dir}}/{{.BeatName}}/lib/windows-64/wpcap.dll")}, - []string{"mv", "wpcap.def", devtools.MustExpand("{{ elastic_beats_dir}}/{{.BeatName}}/lib/windows-64/wpcap.def")}, - []string{"x86_64-w64-mingw32-dlltool", "--as-flags=--64", - "-m", "i386:x86-64", "-k", - "--output-lib", "/libpcap/win/WpdPack/Lib/x64/libwpcap.a", - "--input-def", devtools.MustExpand("{{elastic_beats_dir}}/{{.BeatName}}/lib/windows-64/wpcap.def")}, - ) -} - -// customizePackaging modifies the device in the configuration files based on -// the target OS. -func customizePackaging() { - var ( - configYml = devtools.PackageFile{ - Mode: 0600, - Source: "{{.PackageDir}}/{{.BeatName}}.yml", - Config: true, - Dep: func(spec devtools.PackageSpec) error { - c := packetbeat.ConfigFileParams() - c.ExtraVars["GOOS"] = spec.OS - c.ExtraVars["GOARCH"] = spec.MustExpand("{{.GOARCH}}") - return devtools.Config(devtools.ShortConfigType, c, spec.MustExpand("{{.PackageDir}}")) - }, - } - referenceConfigYml = devtools.PackageFile{ - Mode: 0644, - Source: "{{.PackageDir}}/{{.BeatName}}.reference.yml", - Dep: func(spec devtools.PackageSpec) error { - c := packetbeat.ConfigFileParams() - c.ExtraVars["GOOS"] = spec.OS - c.ExtraVars["GOARCH"] = spec.MustExpand("{{.GOARCH}}") - return devtools.Config(devtools.ReferenceConfigType, c, spec.MustExpand("{{.PackageDir}}")) - }, - } - ) - - for _, args := range devtools.Packages { - for _, pkgType := range args.Types { - switch pkgType { - case devtools.TarGz, devtools.Zip: - args.Spec.ReplaceFile("{{.BeatName}}.yml", configYml) - args.Spec.ReplaceFile("{{.BeatName}}.reference.yml", referenceConfigYml) - case devtools.Deb, devtools.RPM, devtools.DMG: - args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.yml", configYml) - args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.reference.yml", referenceConfigYml) - case devtools.Docker: - args.Spec.ExtraVar("linux_capabilities", "cap_net_raw,cap_net_admin=eip") - default: - panic(errors.Errorf("unhandled package type: %v", pkgType)) - } - - // Match the first package type then continue. - break - } - } -} diff --git a/packetbeat/scripts/mage/config.go b/packetbeat/scripts/mage/config.go index a143cda22e7..c8b3d7242a4 100644 --- a/packetbeat/scripts/mage/config.go +++ b/packetbeat/scripts/mage/config.go @@ -18,6 +18,8 @@ package mage import ( + "github.com/magefile/mage/mg" + devtools "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -50,3 +52,28 @@ func ConfigFileParams() devtools.ConfigFileParams { } return p } + +// Fields generates fields.yml and fields.go files for the Beat. +func Fields() { + mg.Deps(libbeatAndPacketbeatCommonFieldsGo, protosFieldsGo) + mg.Deps(FieldsYML) +} + +// libbeatAndPacketbeatCommonFieldsGo generates a fields.go containing both +// libbeat and packetbeat's common fields. +func libbeatAndPacketbeatCommonFieldsGo() error { + if err := devtools.GenerateFieldsYAML(); err != nil { + return err + } + return devtools.GenerateAllInOneFieldsGo() +} + +// protosFieldsGo generates a fields.go for each protocol. +func protosFieldsGo() error { + return devtools.GenerateModuleFieldsGo(devtools.OSSBeatDir("protos")) +} + +// FieldsYML generates the fields.yml file containing all fields. +func FieldsYML() error { + return devtools.GenerateFieldsYAML(devtools.OSSBeatDir("protos")) +} diff --git a/packetbeat/scripts/mage/package.go b/packetbeat/scripts/mage/package.go new file mode 100644 index 00000000000..e81176615ba --- /dev/null +++ b/packetbeat/scripts/mage/package.go @@ -0,0 +1,72 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "github.com/pkg/errors" + + devtools "github.com/elastic/beats/v7/dev-tools/mage" +) + +// CustomizePackaging modifies the device in the configuration files based on +// the target OS. +func CustomizePackaging() { + var ( + configYml = devtools.PackageFile{ + Mode: 0600, + Source: "{{.PackageDir}}/{{.BeatName}}.yml", + Config: true, + Dep: func(spec devtools.PackageSpec) error { + c := ConfigFileParams() + c.ExtraVars["GOOS"] = spec.OS + c.ExtraVars["GOARCH"] = spec.MustExpand("{{.GOARCH}}") + return devtools.Config(devtools.ShortConfigType, c, spec.MustExpand("{{.PackageDir}}")) + }, + } + referenceConfigYml = devtools.PackageFile{ + Mode: 0644, + Source: "{{.PackageDir}}/{{.BeatName}}.reference.yml", + Dep: func(spec devtools.PackageSpec) error { + c := ConfigFileParams() + c.ExtraVars["GOOS"] = spec.OS + c.ExtraVars["GOARCH"] = spec.MustExpand("{{.GOARCH}}") + return devtools.Config(devtools.ReferenceConfigType, c, spec.MustExpand("{{.PackageDir}}")) + }, + } + ) + + for _, args := range devtools.Packages { + for _, pkgType := range args.Types { + switch pkgType { + case devtools.TarGz, devtools.Zip: + args.Spec.ReplaceFile("{{.BeatName}}.yml", configYml) + args.Spec.ReplaceFile("{{.BeatName}}.reference.yml", referenceConfigYml) + case devtools.Deb, devtools.RPM, devtools.DMG: + args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.yml", configYml) + args.Spec.ReplaceFile("/etc/{{.BeatName}}/{{.BeatName}}.reference.yml", referenceConfigYml) + case devtools.Docker: + args.Spec.ExtraVar("linux_capabilities", "cap_net_raw,cap_net_admin=eip") + default: + panic(errors.Errorf("unhandled package type: %v", pkgType)) + } + + // Match the first package type then continue. + break + } + } +} diff --git a/packetbeat/scripts/mage/pcap.go b/packetbeat/scripts/mage/pcap.go new file mode 100644 index 00000000000..5f0ce0dc575 --- /dev/null +++ b/packetbeat/scripts/mage/pcap.go @@ -0,0 +1,274 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "log" + "strings" + + devtools "github.com/elastic/beats/v7/dev-tools/mage" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + "github.com/pkg/errors" +) + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + if dep, found := crossBuildDeps[devtools.Platform.Name]; found { + mg.Deps(dep) + } + + params := devtools.DefaultGolangCrossBuildArgs() + if flags, found := libpcapLDFLAGS[devtools.Platform.Name]; found { + params.Env = map[string]string{ + "CGO_LDFLAGS": flags, + } + } + if flags, found := libpcapCFLAGS[devtools.Platform.Name]; found { + params.Env["CGO_CFLAGS"] = flags + } + + return devtools.GolangCrossBuild(params) +} + +// ----------------------------------------------------------------------------- +// Customizations specific to Packetbeat. +// - Config file contains an OS specific device name (affects darwin, windows). +// - Must compile libpcap or winpcap during cross-compilation. +// - On Linux libpcap is statically linked. Darwin and Windows are dynamic. + +const ( + libpcapURL = "https://s3.amazonaws.com/beats-files/deps/libpcap-1.8.1.tar.gz" + libpcapSHA256 = "673dbc69fdc3f5a86fb5759ab19899039a8e5e6c631749e48dcd9c6f0c83541e" +) + +const ( + linuxPcapLDFLAGS = "-L/libpcap/libpcap-1.8.1 -lpcap" + linuxPcapCFLAGS = "-I /libpcap/libpcap-1.8.1" +) + +var libpcapLDFLAGS = map[string]string{ + "linux/386": linuxPcapLDFLAGS, + "linux/amd64": linuxPcapLDFLAGS, + "linux/arm64": linuxPcapLDFLAGS, + "linux/armv5": linuxPcapLDFLAGS, + "linux/armv6": linuxPcapLDFLAGS, + "linux/armv7": linuxPcapLDFLAGS, + "linux/mips": linuxPcapLDFLAGS, + "linux/mipsle": linuxPcapLDFLAGS, + "linux/mips64": linuxPcapLDFLAGS, + "linux/mips64le": linuxPcapLDFLAGS, + "linux/ppc64le": linuxPcapLDFLAGS, + "linux/s390x": linuxPcapLDFLAGS, + "darwin/amd64": "-lpcap", + "windows/amd64": "-L /libpcap/win/WpdPack/Lib/x64 -lwpcap", + "windows/386": "-L /libpcap/win/WpdPack/Lib -lwpcap", +} + +var libpcapCFLAGS = map[string]string{ + "linux/386": linuxPcapCFLAGS, + "linux/amd64": linuxPcapCFLAGS, + "linux/arm64": linuxPcapCFLAGS, + "linux/armv5": linuxPcapCFLAGS, + "linux/armv6": linuxPcapCFLAGS, + "linux/armv7": linuxPcapCFLAGS, + "linux/mips": linuxPcapCFLAGS, + "linux/mipsle": linuxPcapCFLAGS, + "linux/mips64": linuxPcapCFLAGS, + "linux/mips64le": linuxPcapCFLAGS, + "linux/ppc64le": linuxPcapCFLAGS, + "linux/s390x": linuxPcapCFLAGS, + "windows/amd64": "-I /libpcap/win/WpdPack/Include", + "windows/386": "-I /libpcap/win/WpdPack/Include", +} + +var crossBuildDeps = map[string]func() error{ + "linux/386": buildLibpcapLinux386, + "linux/amd64": buildLibpcapLinuxAMD64, + "linux/arm64": buildLibpcapLinuxARM64, + "linux/armv5": buildLibpcapLinuxARMv5, + "linux/armv6": buildLibpcapLinuxARMv6, + "linux/armv7": buildLibpcapLinuxARMv7, + "linux/mips": buildLibpcapLinuxMIPS, + "linux/mipsle": buildLibpcapLinuxMIPSLE, + "linux/mips64": buildLibpcapLinuxMIPS64, + "linux/mips64le": buildLibpcapLinuxMIPS64LE, + "linux/ppc64le": buildLibpcapLinuxPPC64LE, + "linux/s390x": buildLibpcapLinuxS390x, + "windows/amd64": installLibpcapWindowsAMD64, + "windows/386": installLibpcapWindows386, +} + +// buildLibpcapFromSource builds libpcap from source because the library needs +// to be compiled with -fPIC. +// See https://github.com/elastic/beats/v7/pull/4217. +func buildLibpcapFromSource(params map[string]string) error { + tarFile, err := devtools.DownloadFile(libpcapURL, "/libpcap") + if err != nil { + return errors.Wrap(err, "failed to download libpcap source") + } + + if err = devtools.VerifySHA256(tarFile, libpcapSHA256); err != nil { + return err + } + + if err = devtools.Extract(tarFile, "/libpcap"); err != nil { + return errors.Wrap(err, "failed to extract libpcap") + } + + var configureArgs []string + for k, v := range params { + if strings.HasPrefix(k, "-") { + delete(params, k) + configureArgs = append(configureArgs, k+"="+v) + } + } + + // Use sh -c here because sh.Run does not expose a way to change the CWD. + // This command only runs in Linux so this is fine. + return sh.RunWith(params, "sh", "-c", + "cd /libpcap/libpcap-1.8.1 && "+ + "./configure --enable-usb=no --enable-bluetooth=no --enable-dbus=no "+strings.Join(configureArgs, " ")+"&& "+ + "make") +} + +func buildLibpcapLinux386() error { + return buildLibpcapFromSource(map[string]string{ + "CFLAGS": "-m32", + "LDFLAGS": "-m32", + }) +} + +func buildLibpcapLinuxAMD64() error { + return buildLibpcapFromSource(map[string]string{}) +} + +func buildLibpcapLinuxARM64() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "aarch64-unknown-linux-gnu", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxARMv5() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "arm-linux-gnueabi", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxARMv6() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "arm-linux-gnueabi", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxARMv7() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "arm-linux-gnueabihf", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxMIPS() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "mips-unknown-linux-gnu", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxMIPSLE() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "mipsle-unknown-linux-gnu", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxMIPS64() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "mips64-unknown-linux-gnu", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxMIPS64LE() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "mips64le-unknown-linux-gnu", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxPPC64LE() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "powerpc64le-linux-gnu", + "--with-pcap": "linux", + }) +} + +func buildLibpcapLinuxS390x() error { + return buildLibpcapFromSource(map[string]string{ + "--host": "s390x-ibm-linux-gnu", + "--with-pcap": "linux", + }) +} + +func installLibpcapWindowsAMD64() error { + mg.SerialDeps(installWinpcap, generateWin64StaticWinpcap) + return nil +} + +func installLibpcapWindows386() error { + return installWinpcap() +} + +func installWinpcap() error { + log.Println("Install Winpcap") + const wpdpackURL = "https://www.winpcap.org/install/bin/WpdPack_4_1_2.zip" + + winpcapZip, err := devtools.DownloadFile(wpdpackURL, "/") + if err != nil { + return err + } + + if err = devtools.Extract(winpcapZip, "/libpcap/win"); err != nil { + return err + } + + return nil +} + +func generateWin64StaticWinpcap() error { + log.Println(">> Generating 64-bit winpcap static lib") + + // Notes: We are using absolute path to make sure the files + // are available for x-pack build. + // Ref: https://github.com/elastic/beats/v7/issues/1259 + defer devtools.DockerChown(devtools.MustExpand("{{elastic_beats_dir}}/{{.BeatName}}/lib")) + return devtools.RunCmds( + // Requires mingw-w64-tools. + []string{"gendef", devtools.MustExpand("{{elastic_beats_dir}}/{{.BeatName}}/lib/windows-64/wpcap.dll")}, + []string{"mv", "wpcap.def", devtools.MustExpand("{{ elastic_beats_dir}}/{{.BeatName}}/lib/windows-64/wpcap.def")}, + []string{"x86_64-w64-mingw32-dlltool", "--as-flags=--64", + "-m", "i386:x86-64", "-k", + "--output-lib", "/libpcap/win/WpdPack/Lib/x64/libwpcap.a", + "--input-def", devtools.MustExpand("{{elastic_beats_dir}}/{{.BeatName}}/lib/windows-64/wpcap.def")}, + ) +} diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index a3c11636dc6..0ae3857acf6 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -13,6 +13,34 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + arm: + mage: "mage build unitTest" + platforms: ## override default label in this specific stage. + - "arm" + when: ## Override the top-level when. + comments: + - "/test x-pack/packetbeat for arm" + labels: + - "arm" + parameters: + - "armTest" + branches: true ## for all the branches + tags: true ## for all the tags + build: + mage: "mage build test" + macos: + mage: "mage build unitTest" + platforms: ## override default label in this specific stage. + - "macosx" + when: ## Override the top-level when. + comments: + - "/test x-pack/packetbeat for macos" + labels: + - "macOS" + parameters: + - "macosTest" + branches: true ## for all the branches + tags: true ## for all the tags windows: mage: "mage build unitTest" withModule: true @@ -35,7 +63,7 @@ stages: - "windows-2012-r2" when: ## Override the top-level when. comments: - - "/test x-pack/winlogbeat for windows-2012" + - "/test x-pack/packetbeat for windows-2012" labels: - "windows-2012" branches: true ## for all the branches diff --git a/x-pack/packetbeat/Makefile b/x-pack/packetbeat/Makefile new file mode 100644 index 00000000000..019d3b9309a --- /dev/null +++ b/x-pack/packetbeat/Makefile @@ -0,0 +1,3 @@ +ES_BEATS ?= ../.. + +include $(ES_BEATS)/dev-tools/make/mage.mk diff --git a/x-pack/packetbeat/magefile.go b/x-pack/packetbeat/magefile.go new file mode 100644 index 00000000000..0cdeba15dcd --- /dev/null +++ b/x-pack/packetbeat/magefile.go @@ -0,0 +1,102 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build mage + +package main + +import ( + "fmt" + "os" + "time" + + "github.com/magefile/mage/mg" + + devtools "github.com/elastic/beats/v7/dev-tools/mage" + packetbeat "github.com/elastic/beats/v7/packetbeat/scripts/mage" + + // mage:import + "github.com/elastic/beats/v7/dev-tools/mage/target/common" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/compose" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" + // mage:import + _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" +) + +func init() { + common.RegisterCheckDeps(Update) + + devtools.BeatDescription = "Packetbeat analyzes network traffic and sends the data to Elasticsearch." + devtools.BeatLicense = "Elastic License" +} + +// Update updates the generated files. +func Update() { + mg.SerialDeps(packetbeat.FieldsYML, Dashboards, Config) +} + +// Config generates the config files. +func Config() error { + return devtools.Config(devtools.AllConfigTypes, packetbeat.ConfigFileParams(), ".") +} + +// Dashboards packages kibana dashboards +func Dashboards() error { + return devtools.KibanaDashboards(devtools.OSSBeatDir("protos")) +} + +// Build builds the Beat binary. +func Build() error { + return devtools.Build(devtools.DefaultBuildArgs()) +} + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + return packetbeat.GolangCrossBuild() +} + +// CrossBuild cross-builds the beat for all target platforms. +func CrossBuild() error { + return devtools.CrossBuild() +} + +// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). +func BuildGoDaemon() error { + return devtools.BuildGoDaemon() +} + +// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. +func CrossBuildGoDaemon() error { + return devtools.CrossBuildGoDaemon() +} + +// Package packages the Beat for distribution. +// Use SNAPSHOT=true to build snapshots. +// Use PLATFORMS to control the target platforms. +// Use VERSION_QUALIFIER to control the version qualifier. +func Package() { + start := time.Now() + defer func() { fmt.Println("package ran for", time.Since(start)) }() + + if v, found := os.LookupEnv("AGENT_PACKAGING"); found && v != "" { + devtools.UseElasticBeatXPackReducedPackaging() + } else { + devtools.UseElasticBeatXPackPackaging() + } + + devtools.PackageKibanaDashboardsFromBuildDir() + packetbeat.CustomizePackaging() + + mg.Deps(Update) + mg.Deps(CrossBuild, CrossBuildGoDaemon) + mg.SerialDeps(devtools.Package, TestPackages) +} + +// TestPackages tests the generated packages (i.e. file modes, owners, groups). +func TestPackages() error { + return devtools.TestPackages() +} diff --git a/x-pack/packetbeat/packetbeat.docker.yml b/x-pack/packetbeat/packetbeat.docker.yml new file mode 100644 index 00000000000..4cf9016a926 --- /dev/null +++ b/x-pack/packetbeat/packetbeat.docker.yml @@ -0,0 +1,50 @@ +packetbeat.interfaces.device: any +packetbeat.interfaces.snaplen: 1514 +packetbeat.interfaces.type: af_packet +packetbeat.interfaces.buffer_size_mb: 100 + +packetbeat.flows: + timeout: 30s + period: 10s + +packetbeat.protocols.dns: + ports: [53] + +packetbeat.protocols.http: + ports: [80, 5601, 9200, 8080, 8081, 5000, 8002] + +packetbeat.protocols.memcache: + ports: [11211] + +packetbeat.protocols.mysql: + ports: [3306] + +packetbeat.protocols.pgsql: + ports: [5432] + +packetbeat.protocols.redis: + ports: [6379] + +packetbeat.protocols.thrift: + ports: [9090] + +packetbeat.protocols.mongodb: + ports: [27017] + +packetbeat.protocols.cassandra: + ports: [9042] + +packetbeat.protocols.tls: + ports: [443, 993, 995, 5223, 8443, 8883, 9243] + +packetbeat.protocols.sip: + ports: [5060] + +processors: + - add_cloud_metadata: ~ + - add_docker_metadata: ~ + +output.elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' + username: '${ELASTICSEARCH_USERNAME:}' + password: '${ELASTICSEARCH_PASSWORD:}' diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml new file mode 100644 index 00000000000..6b936240bbb --- /dev/null +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -0,0 +1,2045 @@ +###################### Packetbeat Configuration Example ####################### + +# This file is a full configuration example documenting all non-deprecated +# options in comments. For a shorter configuration example, that contains only +# the most common options, please see packetbeat.yml in the same directory. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +# =============================== Network device =============================== + +# Select the network interface to sniff the data. You can use the "any" +# keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +# Packetbeat supports three sniffer types: +# * pcap, which uses the libpcap library and works on most platforms, but it's +# not the fastest option. +# * af_packet, which uses memory-mapped sniffing. This option is faster than +# libpcap and doesn't require a kernel module, but it's Linux-specific. +#packetbeat.interfaces.type: pcap + +# The maximum size of the packets to capture. The default is 65535, which is +# large enough for almost all networks and interface types. If you sniff on a +# physical network interface, the optimal setting is the MTU size. On virtual +# interfaces, however, it's safer to accept the default value. +#packetbeat.interfaces.snaplen: 65535 + +# The maximum size of the shared memory buffer to use between the kernel and +# user space. A bigger buffer usually results in lower CPU usage, but consumes +# more memory. This setting is only available for the af_packet sniffer type. +# The default is 30 MB. +#packetbeat.interfaces.buffer_size_mb: 30 + +# Packetbeat automatically generates a BPF for capturing only the traffic on +# ports where it expects to find known protocols. Use this settings to tell +# Packetbeat to generate a BPF filter that accepts VLAN tags. +#packetbeat.interfaces.with_vlans: true + +# Use this setting to override the automatically generated BPF filter. +#packetbeat.interfaces.bpf_filter: + +# With `auto_promisc_mode` Packetbeat puts interface in promiscuous mode automatically on startup. +# This option does not work with `any` interface device. +# The default option is false and requires manual set-up of promiscuous mode. +# Warning: under some circumstances (e.g beat crash) promiscuous mode +# can stay enabled even after beat is shut down. +#packetbeat.interfaces.auto_promisc_mode: true + +# =================================== Flows ==================================== + +packetbeat.flows: + # Enable Network flows. Default: true + #enabled: true + + # Set network flow timeout. Flow is killed if no packet is received before being + # timed out. + timeout: 30s + + # Configure reporting period. If set to -1, only killed flows will be reported + period: 10s + + # Set to true to publish fields with null values in events. + #keep_null: false + +# =========================== Transaction protocols ============================ + +packetbeat.protocols: +- type: icmp + # Enable ICMPv4 and ICMPv6 monitoring. Default: true + #enabled: true + + # Set to true to publish fields with null values in events. + #keep_null: false + +- type: amqp + # Enable AMQP monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + # Truncate messages that are published and avoid huge messages being + # indexed. + # Default: 1000 + #max_body_length: 1000 + + # Hide the header fields in header frames. + # Default: false + #parse_headers: false + + # Hide the additional arguments of method frames. + # Default: false + #parse_arguments: false + + # Hide all methods relative to connection negotiation between server and + # client. + # Default: true + #hide_connection_information: true + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: cassandra + #Cassandra port for traffic monitoring. + ports: [9042] + + # If this option is enabled, the raw message of the request (`cassandra_request` field) + # is included in published events. The default is true. + #send_request: true + + # If this option is enabled, the raw message of the response (`cassandra_request.request_headers` field) + # is included in published events. The default is true. enable `send_request` first before enable this option. + #send_request_header: true + + # If this option is enabled, the raw message of the response (`cassandra_response` field) + # is included in published events. The default is true. + #send_response: true + + # If this option is enabled, the raw message of the response (`cassandra_response.response_headers` field) + # is included in published events. The default is true. enable `send_response` first before enable this option. + #send_response_header: true + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. + # By default no compressor is configured. + #compressor: "snappy" + + # This option indicates which Operator/Operators will be ignored. + #ignored_ops: ["SUPPORTED","OPTIONS"] + +- type: dhcpv4 + # Configure the DHCP for IPv4 ports. + ports: [67, 68] + + # Set to true to publish fields with null values in events. + #keep_null: false + +- type: dns + # Enable DNS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + + # include_authorities controls whether or not the dns.authorities field + # (authority resource records) is added to messages. + # Default: false + include_authorities: true + # include_additionals controls whether or not the dns.additionals field + # (additional resource records) is added to messages. + # Default: false + include_additionals: true + + # send_request and send_response control whether or not the stringified DNS + # request and response message are added to the result. + # Nearly all data about the request/response is available in the dns.* + # fields, but this can be useful if you need visibility specifically + # into the request or the response. + # Default: false + # send_request: true + # send_response: true + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: http + # Enable HTTP monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: [80, 8080, 8000, 5000, 8002] + + # Uncomment the following to hide certain parameters in URL or forms attached + # to HTTP requests. The names of the parameters are case insensitive. + # The value of the parameters will be replaced with the 'xxxxx' string. + # This is generally useful for avoiding storing user passwords or other + # sensitive information. + # Only query parameters and top level form parameters are replaced. + # hide_keywords: ['pass', 'password', 'passwd'] + + # A list of header names to capture and send to Elasticsearch. These headers + # are placed under the `headers` dictionary in the resulting JSON. + #send_headers: false + + # Instead of sending a white list of headers to Elasticsearch, you can send + # all headers by setting this option to true. The default is false. + #send_all_headers: false + + # A list of headers to redact if present in the HTTP request. This will keep + # the header field present, but will redact it's value to show the headers + # presence. + #redact_headers: [] + + # The list of content types for which Packetbeat includes the full HTTP + # payload. If the request's or response's Content-Type matches any on this + # list, the full body will be included under the request or response field. + #include_body_for: [] + + # The list of content types for which Packetbeat includes the full HTTP + # request payload. + #include_request_body_for: [] + + # The list of content types for which Packetbeat includes the full HTTP + # response payload. + #include_response_body_for: [] + + # Whether the body of a request must be decoded when a content-encoding + # or transfer-encoding has been applied. + #decode_body: true + + # If the Cookie or Set-Cookie headers are sent, this option controls whether + # they are split into individual values. + #split_cookie: false + + # The header field to extract the real IP from. This setting is useful when + # you want to capture traffic behind a reverse proxy, but you want to get the + # geo-location information. + #real_ip_header: + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Maximum message size. If an HTTP message is larger than this, it will + # be trimmed to this size. Default is 10 MB. + #max_message_size: 10485760 + +- type: memcache + # Enable memcache monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + + # Uncomment the parseunknown option to force the memcache text protocol parser + # to accept unknown commands. + # Note: All unknown commands MUST not contain any data parts! + # Default: false + # parseunknown: true + + # Update the maxvalue option to store the values - base64 encoded - in the + # json output. + # possible values: + # maxvalue: -1 # store all values (text based protocol multi-get) + # maxvalue: 0 # store no values at all + # maxvalue: N # store up to N values + # Default: 0 + # maxvalues: -1 + + # Use maxbytespervalue to limit the number of bytes to be copied per value element. + # Note: Values will be base64 encoded, so actual size in json document + # will be 4 times maxbytespervalue. + # Default: unlimited + # maxbytespervalue: 100 + + # UDP transaction timeout in milliseconds. + # Note: Quiet messages in UDP binary protocol will get response only in error case. + # The memcached analyzer will wait for udptransactiontimeout milliseconds + # before publishing quiet messages. Non quiet messages or quiet requests with + # error response will not have to wait for the timeout. + # Default: 200 + # udptransactiontimeout: 1000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: mysql + # Enable mysql monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306,3307] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: pgsql + # Enable pgsql monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: redis + # Enable redis monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + + # Max size for per-session message queue. This places a limit on the memory + # that can be used to buffer requests and responses for correlation. + #queue_max_bytes: 1048576 + + # Max number of messages for per-session message queue. This limits the number + # of requests or responses that can be buffered for correlation. Set a value + # large enough to allow for pipelining. + #queue_max_messages: 20000 + +- type: thrift + # Enable thrift monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + + # The Thrift transport type. Currently this option accepts the values socket + # for TSocket, which is the default Thrift transport, and framed for the + # TFramed Thrift transport. The default is socket. + #transport_type: socket + + # The Thrift protocol type. Currently the only accepted value is binary for + # the TBinary protocol, which is the default Thrift protocol. + #protocol_type: binary + + # The Thrift interface description language (IDL) files for the service that + # Packetbeat is monitoring. Providing the IDL enables Packetbeat to include + # parameter and exception names. + #idl_files: [] + + # The maximum length for strings in parameters or return values. If a string + # is longer than this value, the string is automatically truncated to this + # length. + #string_max_size: 200 + + # The maximum number of elements in a Thrift list, set, map, or structure. + #collection_max_size: 15 + + # If this option is set to false, Packetbeat decodes the method name from the + # reply and simply skips the rest of the response message. + #capture_reply: true + + # If this option is set to true, Packetbeat replaces all strings found in + # method parameters, return codes, or exception structures with the "*" + # string. + #obfuscate_strings: false + + # The maximum number of fields that a structure can have before Packetbeat + # ignores the whole transaction. + #drop_after_n_struct_fields: 500 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: mongodb + # Enable mongodb monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + + + # The maximum number of documents from the response to index in the `response` + # field. The default is 10. + #max_docs: 10 + + # The maximum number of characters in a single document indexed in the + # `response` field. The default is 5000. You can set this to 0 to index an + # unlimited number of characters per document. + #max_doc_length: 5000 + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: nfs + # Enable NFS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + + # If this option is enabled, the raw message of the request (`request` field) + # is sent to Elasticsearch. The default is false. + #send_request: false + + # If this option is enabled, the raw message of the response (`response` + # field) is sent to Elasticsearch. The default is false. + #send_response: false + + # Set to true to publish fields with null values in events. + #keep_null: false + + # Transaction timeout. Expired transactions will no longer be correlated to + # incoming responses, but sent to Elasticsearch immediately. + #transaction_timeout: 10s + +- type: tls + # Enable TLS monitoring. Default: true + #enabled: true + + # Configure the ports where to listen for TLS traffic. You can disable + # the TLS protocol by commenting out the list of ports. + ports: + - 443 # HTTPS + - 993 # IMAPS + - 995 # POP3S + - 5223 # XMPP over SSL + - 8443 + - 8883 # Secure MQTT + - 9243 # Elasticsearch + + # List of hash algorithms to use to calculate certificates' fingerprints. + # Valid values are `sha1`, `sha256` and `md5`. + #fingerprints: [sha1] + + # If this option is enabled, the client and server certificates and + # certificate chains are sent to Elasticsearch. The default is true. + #send_certificates: true + + # If this option is enabled, the raw certificates will be stored + # in PEM format under the `raw` key. The default is false. + #include_raw_certificates: false + + # Set to true to publish fields with null values in events. + #keep_null: false + +- type: sip + # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports. + ports: [5060] + + # Parse the authorization headers + parse_authorization: true + + # Parse body contents (only when body is SDP) + parse_body: true + + # Preserve original contents in event.original + keep_original: true + +# ============================ Monitored processes ============================= + +# Packetbeat can enrich events with information about the process associated +# the socket that sent or received the packet if Packetbeat is monitoring +# traffic from the host machine. By default process enrichment is disabled. +# This feature works on Linux and Windows. +packetbeat.procs.enabled: false + +# If you want to ignore transactions created by the server on which the shipper +# is installed you can enable this option. This option is useful to remove +# duplicates if shippers are installed on multiple servers. Default value is +# false. +packetbeat.ignore_outgoing: false + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +# If this options is not defined, the hostname is used. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. Tags make it easy to group servers by different +# logical properties. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. Fields can be scalar values, arrays, dictionaries, or any nested +# combination of these. +#fields: +# env: staging + +# If this option is set to true, the custom fields are stored as top-level +# fields in the output document instead of being grouped under a fields +# sub-dictionary. Default is false. +#fields_under_root: false + +# Internal queue configuration for buffering events to be published. +#queue: + # Queue type by name (default 'mem') + # The memory queue will present all available events (up to the outputs + # bulk_max_size) to the output, the moment the output is ready to server + # another batch of events. + #mem: + # Max number of events the queue can buffer. + #events: 4096 + + # Hints the minimum number of events stored in the queue, + # before providing a batch of events to the outputs. + # The default value is set to 2048. + # A value of 0 ensures events are immediately available + # to be sent to the outputs. + #flush.min_events: 2048 + + # Maximum duration after which events are available to the outputs, + # if the number of events stored in the queue is < `flush.min_events`. + #flush.timeout: 1s + + # The spool queue will store events in a local spool file, before + # forwarding the events to the outputs. + # + # Beta: spooling to disk is currently a beta feature. Use with care. + # + # The spool file is a circular buffer, which blocks once the file/buffer is full. + # Events are put into a write buffer and flushed once the write buffer + # is full or the flush_timeout is triggered. + # Once ACKed by the output, events are removed immediately from the queue, + # making space for new events to be persisted. + #spool: + # The file namespace configures the file path and the file creation settings. + # Once the file exists, the `size`, `page_size` and `prealloc` settings + # will have no more effect. + #file: + # Location of spool file. The default value is ${path.data}/spool.dat. + #path: "${path.data}/spool.dat" + + # Configure file permissions if file is created. The default value is 0600. + #permissions: 0600 + + # File size hint. The spool blocks, once this limit is reached. The default value is 100 MiB. + #size: 100MiB + + # The files page size. A file is split into multiple pages of the same size. The default value is 4KiB. + #page_size: 4KiB + + # If prealloc is set, the required space for the file is reserved using + # truncate. The default value is true. + #prealloc: true + + # Spool writer settings + # Events are serialized into a write buffer. The write buffer is flushed if: + # - The buffer limit has been reached. + # - The configured limit of buffered events is reached. + # - The flush timeout is triggered. + #write: + # Sets the write buffer size. + #buffer_size: 1MiB + + # Maximum duration after which events are flushed if the write buffer + # is not full yet. The default value is 1s. + #flush.timeout: 1s + + # Number of maximum buffered events. The write buffer is flushed once the + # limit is reached. + #flush.events: 16384 + + # Configure the on-disk event encoding. The encoding can be changed + # between restarts. + # Valid encodings are: json, ubjson, and cbor. + #codec: cbor + #read: + # Reader flush timeout, waiting for more events to become available, so + # to fill a complete batch as required by the outputs. + # If flush_timeout is 0, all available events are forwarded to the + # outputs immediately. + # The default value is 0s. + #flush.timeout: 0s + +# Sets the maximum number of CPUs that can be executing simultaneously. The +# default is the number of logical CPUs available in the system. +#max_procs: + +# ================================= Processors ================================= + +# Processors are used to reduce the number of fields in the exported event or to +# enhance the event with external metadata. This section defines a list of +# processors that are applied one by one and the first one receives the initial +# event: +# +# event -> filter1 -> event1 -> filter2 ->event2 ... +# +# The supported processors are drop_fields, drop_event, include_fields, +# decode_json_fields, and add_cloud_metadata. +# +# For example, you can use the following processors to keep the fields that +# contain CPU load percentages, but remove the fields that contain CPU ticks +# values: +# +#processors: +# - include_fields: +# fields: ["cpu"] +# - drop_fields: +# fields: ["cpu.user", "cpu.system"] +# +# The following example drops the events that have the HTTP response code 200: +# +#processors: +# - drop_event: +# when: +# equals: +# http.code: 200 +# +# The following example renames the field a to b: +# +#processors: +# - rename: +# fields: +# - from: "a" +# to: "b" +# +# The following example tokenizes the string into fields: +# +#processors: +# - dissect: +# tokenizer: "%{key1} - %{key2}" +# field: "message" +# target_prefix: "dissect" +# +# The following example enriches each event with metadata from the cloud +# provider about the host machine. It works on EC2, GCE, DigitalOcean, +# Tencent Cloud, and Alibaba Cloud. +# +#processors: +# - add_cloud_metadata: ~ +# +# The following example enriches each event with the machine's local time zone +# offset from UTC. +# +#processors: +# - add_locale: +# format: offset +# +# The following example enriches each event with docker metadata, it matches +# given fields to an existing container id and adds info from that container: +# +#processors: +# - add_docker_metadata: +# host: "unix:///var/run/docker.sock" +# match_fields: ["system.process.cgroup.id"] +# match_pids: ["process.pid", "process.ppid"] +# match_source: true +# match_source_index: 4 +# match_short_id: false +# cleanup_timeout: 60 +# labels.dedot: false +# # To connect to Docker over TLS you must specify a client and CA certificate. +# #ssl: +# # certificate_authority: "/etc/pki/root/ca.pem" +# # certificate: "/etc/pki/client/cert.pem" +# # key: "/etc/pki/client/cert.key" +# +# The following example enriches each event with docker metadata, it matches +# container id from log path available in `source` field (by default it expects +# it to be /var/lib/docker/containers/*/*.log). +# +#processors: +# - add_docker_metadata: ~ +# +# The following example enriches each event with host metadata. +# +#processors: +# - add_host_metadata: ~ +# +# The following example enriches each event with process metadata using +# process IDs included in the event. +# +#processors: +# - add_process_metadata: +# match_pids: ["system.process.ppid"] +# target: system.process.parent +# +# The following example decodes fields containing JSON strings +# and replaces the strings with valid JSON objects. +# +#processors: +# - decode_json_fields: +# fields: ["field1", "field2", ...] +# process_array: false +# max_depth: 1 +# target: "" +# overwrite_keys: false +# +#processors: +# - decompress_gzip_field: +# from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true +# +# The following example copies the value of message to message_copied +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: message_copied +# fail_on_error: true +# ignore_missing: false +# +# The following example truncates the value of message to 1024 bytes +# +#processors: +# - truncate_fields: +# fields: +# - message +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example preserves the raw message under event.original +# +#processors: +# - copy_fields: +# fields: +# - from: message +# to: event.original +# fail_on_error: false +# ignore_missing: true +# - truncate_fields: +# fields: +# - event.original +# max_bytes: 1024 +# fail_on_error: false +# ignore_missing: true +# +# The following example URL-decodes the value of field1 to field2 +# +#processors: +# - urldecode: +# fields: +# - from: "field1" +# to: "field2" +# ignore_missing: false +# fail_on_error: true + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Number of workers per Elasticsearch host. + #worker: 1 + + # Optional index name. The default is "packetbeat" plus date + # and generates [packetbeat-]YYYY.MM.DD keys. + # In case you modify this pattern you must update setup.template.name and setup.template.pattern accordingly. + #index: "packetbeat-%{[agent.version]}-%{+yyyy.MM.dd}" + + # Optional ingest node pipeline. By default no pipeline will be used. + #pipeline: "" + + # Optional HTTP path + #path: "/elasticsearch" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server URL + #proxy_url: http://proxy:3128 + + # Whether to disable proxy settings for outgoing connections. If true, this + # takes precedence over both the proxy_url field and any environment settings + # (HTTP_PROXY, HTTPS_PROXY). The default is false. + #proxy_disable: false + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing a request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Number of workers per Logstash host. + #worker: 1 + + # Set gzip compression level. + #compression_level: 3 + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Optional maximum time to live for a connection to Logstash, after which the + # connection will be re-established. A value of `0s` (the default) will + # disable this feature. + # + # Not yet supported for async connections (i.e. with the "pipelining" option set) + #ttl: 30s + + # Optionally load-balance events between Logstash hosts. Default is false. + #loadbalance: false + + # Number of batches to be sent asynchronously to Logstash while processing + # new batches. + #pipelining: 2 + + # If enabled only a subset of events in a batch of events is transferred per + # transaction. The number of events to be sent increases up to `bulk_max_size` + # if no error is encountered. + #slow_start: false + + # The number of seconds to wait before trying to reconnect to Logstash + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Logstash after a network error. The default is 60s. + #backoff.max: 60s + + # Optional index name. The default index name is set to packetbeat + # in all lowercase. + #index: 'packetbeat' + + # SOCKS5 proxy server URL + #proxy_url: socks5://user:password@socks5-server:2233 + + # Resolve names locally when using a proxy server. Defaults to false. + #proxy_use_local_resolver: false + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat and Winlogbeat, ignore the max_retries setting + # and retry until all events are published. Set max_retries to a value less + # than 0 to retry until all events are published. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Logstash request. The + # default is 2048. + #bulk_max_size: 2048 + + # The number of seconds to wait for responses from the Logstash server before + # timing out. The default is 30s. + #timeout: 30s + +# -------------------------------- Kafka Output -------------------------------- +#output.kafka: + # Boolean flag to enable or disable the output module. + #enabled: true + + # The list of Kafka broker addresses from which to fetch the cluster metadata. + # The cluster metadata contain the actual Kafka brokers events are published + # to. + #hosts: ["localhost:9092"] + + # The Kafka topic used for produced events. The setting can be a format string + # using any event field. To set the topic from document type use `%{[type]}`. + #topic: beats + + # The Kafka event key setting. Use format string to create a unique event key. + # By default no event key will be generated. + #key: '' + + # The Kafka event partitioning strategy. Default hashing strategy is `hash` + # using the `output.kafka.key` setting or randomly distributes events if + # `output.kafka.key` is not configured. + #partition.hash: + # If enabled, events will only be published to partitions with reachable + # leaders. Default is false. + #reachable_only: false + + # Configure alternative event field names used to compute the hash value. + # If empty `output.kafka.key` setting will be used. + # Default value is empty list. + #hash: [] + + # Authentication details. Password is required if username is set. + #username: '' + #password: '' + + # Kafka version Packetbeat is assumed to run against. Defaults to the "1.0.0". + #version: '1.0.0' + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Metadata update configuration. Metadata contains leader information + # used to decide which broker to use when publishing. + #metadata: + # Max metadata request retry attempts when cluster is in middle of leader + # election. Defaults to 3 retries. + #retry.max: 3 + + # Wait time between retries during leader elections. Default is 250ms. + #retry.backoff: 250ms + + # Refresh metadata interval. Defaults to every 10 minutes. + #refresh_frequency: 10m + + # Strategy for fetching the topics metadata from the broker. Default is false. + #full: false + + # The number of concurrent load-balanced Kafka output workers. + #worker: 1 + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to republish to Kafka + # after a network error. After waiting backoff.init seconds, the Beat + # tries to republish. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful publish, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to republish to + # Kafka after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Kafka request. The default + # is 2048. + #bulk_max_size: 2048 + + # Duration to wait before sending bulk Kafka request. 0 is no delay. The default + # is 0. + #bulk_flush_frequency: 0s + + # The number of seconds to wait for responses from the Kafka brokers before + # timing out. The default is 30s. + #timeout: 30s + + # The maximum duration a broker will wait for number of required ACKs. The + # default is 10s. + #broker_timeout: 10s + + # The number of messages buffered for each Kafka broker. The default is 256. + #channel_buffer_size: 256 + + # The keep-alive period for an active network connection. If 0s, keep-alives + # are disabled. The default is 0 seconds. + #keep_alive: 0 + + # Sets the output compression codec. Must be one of none, snappy and gzip. The + # default is gzip. + #compression: gzip + + # Set the compression level. Currently only gzip provides a compression level + # between 0 and 9. The default value is chosen by the compression algorithm. + #compression_level: 4 + + # The maximum permitted size of JSON-encoded messages. Bigger messages will be + # dropped. The default value is 1000000 (bytes). This value should be equal to + # or less than the broker's message.max.bytes. + #max_message_bytes: 1000000 + + # The ACK reliability level required from broker. 0=no response, 1=wait for + # local commit, -1=wait for all replicas to commit. The default is 1. Note: + # If set to 0, no ACKs are returned by Kafka. Messages might be lost silently + # on error. + #required_acks: 1 + + # The configurable ClientID used for logging, debugging, and auditing + # purposes. The default is "beats". + #client_id: beats + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/security/keytabs/kafka.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # The service name. Service principal name is contructed from + # service_name/hostname@realm. + #kerberos.service_name: kafka + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + +# -------------------------------- Redis Output -------------------------------- +#output.redis: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty print json event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # The list of Redis servers to connect to. If load-balancing is enabled, the + # events are distributed to the servers in the list. If one server becomes + # unreachable, the events are distributed to the reachable servers only. + # The hosts setting supports redis and rediss urls with custom password like + # redis://:password@localhost:6379. + #hosts: ["localhost:6379"] + + # The name of the Redis list or channel the events are published to. The + # default is packetbeat. + #key: packetbeat + + # The password to authenticate to Redis with. The default is no authentication. + #password: + + # The Redis database number where the events are published. The default is 0. + #db: 0 + + # The Redis data type to use for publishing events. If the data type is list, + # the Redis RPUSH command is used. If the data type is channel, the Redis + # PUBLISH command is used. The default value is list. + #datatype: list + + # The number of workers to use for each host configured to publish events to + # Redis. Use this setting along with the loadbalance option. For example, if + # you have 2 hosts and 3 workers, in total 6 workers are started (3 for each + # host). + #worker: 1 + + # If set to true and multiple hosts or workers are configured, the output + # plugin load balances published events onto all Redis hosts. If set to false, + # the output plugin sends all events to only one host (determined at random) + # and will switch to another host if the currently selected one becomes + # unreachable. The default value is true. + #loadbalance: true + + # The Redis connection timeout in seconds. The default is 5 seconds. + #timeout: 5s + + # The number of times to retry publishing an event after a publishing failure. + # After the specified number of retries, the events are typically dropped. + # Some Beats, such as Filebeat, ignore the max_retries setting and retry until + # all events are published. Set max_retries to a value less than 0 to retry + # until all events are published. The default is 3. + #max_retries: 3 + + # The number of seconds to wait before trying to reconnect to Redis + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Redis after a network error. The default is 60s. + #backoff.max: 60s + + # The maximum number of events to bulk in a single Redis request or pipeline. + # The default is 2048. + #bulk_max_size: 2048 + + # The URL of the SOCKS5 proxy to use when connecting to the Redis servers. The + # value must be a URL with a scheme of socks5://. + #proxy_url: + + # This option determines whether Redis hostnames are resolved locally when + # using a proxy. The default value is false, which means that name resolution + # occurs on the proxy server. + #proxy_use_local_resolver: false + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# -------------------------------- File Output --------------------------------- +#output.file: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + + # Path to the directory where to save the generated files. The option is + # mandatory. + #path: "/tmp/packetbeat" + + # Name of the generated files. The default is `packetbeat` and it generates + # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + #filename: packetbeat + + # Maximum size in kilobytes of each file. When this size is reached, and on + # every Packetbeat restart, the files are rotated. The default value is 10240 + # kB. + #rotate_every_kb: 10000 + + # Maximum number of files under path. When this number of files is reached, + # the oldest file is deleted and the rest are shifted from last to first. The + # default is 7 files. + #number_of_files: 7 + + # Permissions to use for file creation. The default is 0600. + #permissions: 0600 + +# ------------------------------- Console Output ------------------------------- +#output.console: + # Boolean flag to enable or disable the output module. + #enabled: true + + # Configure JSON encoding + #codec.json: + # Pretty-print JSON event + #pretty: false + + # Configure escaping HTML symbols in strings. + #escape_html: false + +# =================================== Paths ==================================== + +# The home path for the Packetbeat installation. This is the default base path +# for all other path settings and for miscellaneous files that come with the +# distribution (for example, the sample dashboards). +# If not set by a CLI flag or in the configuration file, the default for the +# home path is the location of the binary. +#path.home: + +# The configuration path for the Packetbeat installation. This is the default +# base path for configuration files, including the main YAML configuration file +# and the Elasticsearch template file. If not set by a CLI flag or in the +# configuration file, the default for the configuration path is the home path. +#path.config: ${path.home} + +# The data path for the Packetbeat installation. This is the default base path +# for all the files in which Packetbeat needs to store its data. If not set by a +# CLI flag or in the configuration file, the default for the data path is a data +# subdirectory inside the home path. +#path.data: ${path.home}/data + +# The logs path for a Packetbeat installation. This is the default location for +# the Beat's log files. If not set by a CLI flag or in the configuration file, +# the default for the logs path is a logs subdirectory inside the home path. +#path.logs: ${path.home}/logs + +# ================================== Keystore ================================== + +# Location of the Keystore containing the keys and their sensitive values. +#keystore.path: "${path.config}/beats.keystore" + +# ================================= Dashboards ================================= + +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards are disabled by default and can be enabled either by setting the +# options here, or by using the `-setup` CLI flag or the `setup` command. +#setup.dashboards.enabled: false + +# The directory from where to read the dashboards. The default is the `kibana` +# folder in the home path. +#setup.dashboards.directory: ${path.home}/kibana + +# The URL from where to download the dashboards archive. It is used instead of +# the directory if it has a value. +#setup.dashboards.url: + +# The file archive (zip file) from where to read the dashboards. It is used instead +# of the directory when it has a value. +#setup.dashboards.file: + +# In case the archive contains the dashboards from multiple Beats, this lets you +# select which one to load. You can load all the dashboards in the archive by +# setting this to the empty string. +#setup.dashboards.beat: packetbeat + +# The name of the Kibana index to use for setting the configuration. Default is ".kibana" +#setup.dashboards.kibana_index: .kibana + +# The Elasticsearch index name. This overwrites the index name defined in the +# dashboards and index pattern. Example: testbeat-* +#setup.dashboards.index: + +# Always use the Kibana API for loading the dashboards instead of autodetecting +# how to install the dashboards by first querying Elasticsearch. +#setup.dashboards.always_kibana: false + +# If true and Kibana is not reachable at the time when dashboards are loaded, +# it will retry to reconnect to Kibana instead of exiting with an error. +#setup.dashboards.retry.enabled: false + +# Duration interval between Kibana connection retries. +#setup.dashboards.retry.interval: 1s + +# Maximum number of retries before exiting with an error, 0 for unlimited retrying. +#setup.dashboards.retry.maximum: 0 + +# ================================== Template ================================== + +# A template is used to set the mapping in Elasticsearch +# By default template loading is enabled and the template is loaded. +# These settings can be adjusted to load your own template or overwrite existing ones. + +# Set to false to disable template loading. +#setup.template.enabled: true + +# Select the kind of index template. From Elasticsearch 7.8, it is possible to +# use component templates. Available options: legacy, component, index. +# By default packetbeat uses the legacy index templates. +#setup.template.type: legacy + +# Template name. By default the template name is "packetbeat-%{[agent.version]}" +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +#setup.template.name: "packetbeat-%{[agent.version]}" + +# Template pattern. By default the template pattern is "-%{[agent.version]}-*" to apply to the default index settings. +# The first part is the version of the beat and then -* is used to match all daily indices. +# The template name and pattern has to be set in case the Elasticsearch index pattern is modified. +#setup.template.pattern: "packetbeat-%{[agent.version]}-*" + +# Path to fields.yml file to generate the template +#setup.template.fields: "${path.config}/fields.yml" + +# A list of fields to be added to the template and Kibana index pattern. Also +# specify setup.template.overwrite: true to overwrite the existing template. +#setup.template.append_fields: +#- name: field_name +# type: field_type + +# Enable JSON template loading. If this is enabled, the fields.yml is ignored. +#setup.template.json.enabled: false + +# Path to the JSON template file +#setup.template.json.path: "${path.config}/template.json" + +# Name under which the template is stored in Elasticsearch +#setup.template.json.name: "" + +# Overwrite existing template +#setup.template.overwrite: false + +# Elasticsearch template settings +setup.template.settings: + + # A dictionary of settings to place into the settings.index dictionary + # of the Elasticsearch template. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping.html + #index: + #number_of_shards: 1 + #codec: best_compression + + # A dictionary of settings for the _source field. For more details, please check + # https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html + #_source: + #enabled: false + +# ====================== Index Lifecycle Management (ILM) ====================== + +# Configure index lifecycle management (ILM). These settings create a write +# alias and add additional settings to the index template. When ILM is enabled, +# output.elasticsearch.index is ignored, and the write alias is used to set the +# index name. + +# Enable ILM support. Valid values are true, false, and auto. When set to auto +# (the default), the Beat uses index lifecycle management when it connects to a +# cluster that supports ILM; otherwise, it creates daily indices. +#setup.ilm.enabled: auto + +# Set the prefix used in the index lifecycle write alias name. The default alias +# name is 'packetbeat-%{[agent.version]}'. +#setup.ilm.rollover_alias: 'packetbeat' + +# Set the rollover index pattern. The default is "%{now/d}-000001". +#setup.ilm.pattern: "{now/d}-000001" + +# Set the lifecycle policy name. The default policy name is +# 'beatname'. +#setup.ilm.policy_name: "mypolicy" + +# The path to a JSON file that contains a lifecycle policy configuration. Used +# to load your own lifecycle policy. +#setup.ilm.policy_file: + +# Disable the check for an existing lifecycle policy. The default is true. If +# you disable this check, set setup.ilm.overwrite: true so the lifecycle policy +# can be installed. +#setup.ilm.check_exists: true + +# Overwrite the lifecycle policy at startup. The default is false. +#setup.ilm.overwrite: false + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" + + # Optional HTTP path + #path: "" + + # Optional Kibana space ID. + #space.id: "" + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + +# ================================== Logging =================================== + +# There are four options for the log output: file, stderr, syslog, eventlog +# The file output is the default. + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: info + +# Enable debug output for selected components. To enable all selectors use ["*"] +# Other available selectors are "beat", "publish", "service" +# Multiple selectors can be chained. +#logging.selectors: [ ] + +# Send all logging output to stderr. The default is false. +#logging.to_stderr: false + +# Send all logging output to syslog. The default is false. +#logging.to_syslog: false + +# Send all logging output to Windows Event Logs. The default is false. +#logging.to_eventlog: false + +# If enabled, Packetbeat periodically logs its internal metrics that have changed +# in the last period. For each metric that changed, the delta from the value at +# the beginning of the period is logged. Also, the total values for +# all non-zero internal metrics are logged on shutdown. The default is true. +#logging.metrics.enabled: true + +# The period after which to log the internal metrics. The default is 30s. +#logging.metrics.period: 30s + +# Logging to rotating files. Set logging.to_files to false to disable logging to +# files. +logging.to_files: true +logging.files: + # Configure the path where the logs are written. The default is the logs directory + # under the home path (the binary location). + #path: /var/log/packetbeat + + # The name of the files where the logs are written to. + #name: packetbeat + + # Configure log file size limit. If limit is reached, log file will be + # automatically rotated + #rotateeverybytes: 10485760 # = 10MB + + # Number of rotated log files to keep. Oldest files will be deleted first. + #keepfiles: 7 + + # The permissions mask to apply when rotating log files. The default value is 0600. + # Must be a valid Unix-style file permissions mask expressed in octal notation. + #permissions: 0600 + + # Enable log file rotation on time intervals in addition to size-based rotation. + # Intervals must be at least 1s. Values of 1m, 1h, 24h, 7*24h, 30*24h, and 365*24h + # are boundary-aligned with minutes, hours, days, weeks, months, and years as + # reported by the local system clock. All other intervals are calculated from the + # Unix epoch. Defaults to disabled. + #interval: 0 + + # Rotate existing logs on startup rather than appending to the existing + # file. Defaults to true. + # rotateonstartup: true + +# Set to true to log messages in JSON format. +#logging.json: false + +# Set to true, to log messages with minimal required Elastic Common Schema (ECS) +# information. Recommended to use in combination with `logging.json=true` +# Defaults to false. +#logging.ecs: false + +# ============================= X-Pack Monitoring ============================== +# Packetbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +#monitoring.elasticsearch: + + # Array of hosts to connect to. + # Scheme and port can be left out and will be set to the default (http and 9200) + # In case you specify and additional path, the scheme is required: http://localhost:9200/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200 + #hosts: ["localhost:9200"] + + # Set gzip compression level. + #compression_level: 0 + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "beats_system" + #password: "changeme" + + # Dictionary of HTTP parameters to pass within the URL with index operations. + #parameters: + #param1: value1 + #param2: value2 + + # Custom HTTP headers to add to each request + #headers: + # X-My-Header: Contents of the header + + # Proxy server url + #proxy_url: http://proxy:3128 + + # The number of times a particular Elasticsearch index operation is attempted. If + # the indexing operation doesn't succeed after this many retries, the events are + # dropped. The default is 3. + #max_retries: 3 + + # The maximum number of events to bulk in a single Elasticsearch bulk API index request. + # The default is 50. + #bulk_max_size: 50 + + # The number of seconds to wait before trying to reconnect to Elasticsearch + # after a network error. After waiting backoff.init seconds, the Beat + # tries to reconnect. If the attempt fails, the backoff timer is increased + # exponentially up to backoff.max. After a successful connection, the backoff + # timer is reset. The default is 1s. + #backoff.init: 1s + + # The maximum number of seconds to wait before attempting to connect to + # Elasticsearch after a network error. The default is 60s. + #backoff.max: 60s + + # Configure HTTP request timeout before failing an request to Elasticsearch. + #timeout: 90 + + # Use SSL settings for HTTPS. + #ssl.enabled: true + + # Controls the verification of certificates. Valid values are: + # * full, which verifies that the provided certificate is signed by a trusted + # authority (CA) and also verifies that the server's hostname (or IP address) + # matches the names identified within the certificate. + # * certificate, which verifies that the provided certificate is signed by a + # trusted authority (CA), but does not perform any hostname verification. + # * none, which performs no verification of the server's certificate. This + # mode disables many of the security benefits of SSL/TLS and should only be used + # after very careful consideration. It is primarily intended as a temporary + # diagnostic mechanism when attempting to resolve TLS errors; its use in + # production environments is strongly discouraged. + # The default value is full. + #ssl.verification_mode: full + + # List of supported/valid TLS versions. By default all TLS versions from 1.1 + # up to 1.3 are enabled. + #ssl.supported_protocols: [TLSv1.1, TLSv1.2, TLSv1.3] + + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client certificate key + #ssl.key: "/etc/pki/client/cert.key" + + # Optional passphrase for decrypting the certificate key. + #ssl.key_passphrase: '' + + # Configure cipher suites to be used for SSL connections + #ssl.cipher_suites: [] + + # Configure curve types for ECDHE-based cipher suites + #ssl.curve_types: [] + + # Configure what types of renegotiation are supported. Valid options are + # never, once, and freely. Default is never. + #ssl.renegotiation: never + + # Configure a pin that can be used to do extra validation of the verified certificate chain, + # this allow you to ensure that a specific certificate is used to validate the chain of trust. + # + # The pin is a base64 encoded string of the SHA-256 fingerprint. + #ssl.ca_sha256: "" + + # Enable Kerberos support. Kerberos is automatically enabled if any Kerberos setting is set. + #kerberos.enabled: true + + # Authentication type to use with Kerberos. Available options: keytab, password. + #kerberos.auth_type: password + + # Path to the keytab file. It is used when auth_type is set to keytab. + #kerberos.keytab: /etc/elastic.keytab + + # Path to the Kerberos configuration. + #kerberos.config_path: /etc/krb5.conf + + # Name of the Kerberos user. + #kerberos.username: elastic + + # Password of the Kerberos user. It is used when auth_type is set to password. + #kerberos.password: changeme + + # Kerberos realm. + #kerberos.realm: ELASTIC + + #metrics.period: 10s + #state.period: 1m + +# The `monitoring.cloud.id` setting overwrites the `monitoring.elasticsearch.hosts` +# setting. You can find the value for this setting in the Elastic Cloud web UI. +#monitoring.cloud.id: + +# The `monitoring.cloud.auth` setting overwrites the `monitoring.elasticsearch.username` +# and `monitoring.elasticsearch.password` settings. The format is `:`. +#monitoring.cloud.auth: + +# =============================== HTTP Endpoint ================================ + +# Each beat can expose internal metrics through a HTTP endpoint. For security +# reasons the endpoint is disabled by default. This feature is currently experimental. +# Stats can be access through http://localhost:5066/stats . For pretty JSON output +# append ?pretty to the URL. + +# Defines if the HTTP endpoint is enabled. +#http.enabled: false + +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. +#http.host: localhost + +# Port on which the HTTP endpoint will bind. Default is 5066. +#http.port: 5066 + +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + +# ============================== Process Security ============================== + +# Enable or disable seccomp system call filtering on Linux. Default is enabled. +#seccomp.enabled: true + +# ============================== Instrumentation =============================== + +# Instrumentation support for the packetbeat. +#instrumentation: + # Set to true to enable instrumentation of packetbeat. + #enabled: false + + # Environment in which packetbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + # Enable profiling of the server, recording profile samples as events. + # + # This feature is experimental. + #profiling: + #cpu: + # Set to true to enable CPU profiling. + #enabled: false + #interval: 60s + #duration: 10s + #heap: + # Set to true to enable heap profiling. + #enabled: false + #interval: 60s + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: false + diff --git a/x-pack/packetbeat/packetbeat.yml b/x-pack/packetbeat/packetbeat.yml new file mode 100644 index 00000000000..31c229b1ef7 --- /dev/null +++ b/x-pack/packetbeat/packetbeat.yml @@ -0,0 +1,277 @@ +#################### Packetbeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The packetbeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/packetbeat/index.html + +# =============================== Network device =============================== + +# Select the network interface to sniff the data. On Linux, you can use the +# "any" keyword to sniff on all connected interfaces. +packetbeat.interfaces.device: any + +# =================================== Flows ==================================== + +# Set `enabled: false` or comment out all options to disable flows reporting. +packetbeat.flows: + # Set network flow timeout. Flow is killed if no packet is received before being + # timed out. + timeout: 30s + + # Configure reporting period. If set to -1, only killed flows will be reported + period: 10s + +# =========================== Transaction protocols ============================ + +packetbeat.protocols: +- type: icmp + # Enable ICMPv4 and ICMPv6 monitoring. Default: false + enabled: true + +- type: amqp + # Configure the ports where to listen for AMQP traffic. You can disable + # the AMQP protocol by commenting out the list of ports. + ports: [5672] + +- type: cassandra + #Cassandra port for traffic monitoring. + ports: [9042] + +- type: dhcpv4 + # Configure the DHCP for IPv4 ports. + ports: [67, 68] + +- type: dns + # Configure the ports where to listen for DNS traffic. You can disable + # the DNS protocol by commenting out the list of ports. + ports: [53] + +- type: http + # Configure the ports where to listen for HTTP traffic. You can disable + # the HTTP protocol by commenting out the list of ports. + ports: [80, 8080, 8000, 5000, 8002] + +- type: memcache + # Configure the ports where to listen for memcache traffic. You can disable + # the Memcache protocol by commenting out the list of ports. + ports: [11211] + +- type: mysql + # Configure the ports where to listen for MySQL traffic. You can disable + # the MySQL protocol by commenting out the list of ports. + ports: [3306,3307] + +- type: pgsql + # Configure the ports where to listen for Pgsql traffic. You can disable + # the Pgsql protocol by commenting out the list of ports. + ports: [5432] + +- type: redis + # Configure the ports where to listen for Redis traffic. You can disable + # the Redis protocol by commenting out the list of ports. + ports: [6379] + +- type: thrift + # Configure the ports where to listen for Thrift-RPC traffic. You can disable + # the Thrift-RPC protocol by commenting out the list of ports. + ports: [9090] + +- type: mongodb + # Configure the ports where to listen for MongoDB traffic. You can disable + # the MongoDB protocol by commenting out the list of ports. + ports: [27017] + +- type: nfs + # Configure the ports where to listen for NFS traffic. You can disable + # the NFS protocol by commenting out the list of ports. + ports: [2049] + +- type: tls + # Configure the ports where to listen for TLS traffic. You can disable + # the TLS protocol by commenting out the list of ports. + ports: + - 443 # HTTPS + - 993 # IMAPS + - 995 # POP3S + - 5223 # XMPP over SSL + - 8443 + - 8883 # Secure MQTT + - 9243 # Elasticsearch + +- type: sip + # Configure the ports where to listen for SIP traffic. You can disable the SIP protocol by commenting out the list of ports. + ports: [5060] + +# ======================= Elasticsearch template setting ======================= + +setup.template.settings: + index.number_of_shards: 1 + #index.codec: best_compression + #_source.enabled: false + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# A list of tags to include in every event. In the default configuration file +# the forwarded tag causes Packetbeat to not add any host fields. If you are +# monitoring a network tap or mirror port then add the forwarded tag. +#tags: [forwarded] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + +# ================================= Dashboards ================================= +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here or by using the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Packetbeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Array of hosts to connect to. + hosts: ["localhost:9200"] + + # Protocol - either `http` (default) or `https`. + #protocol: "https" + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + #username: "elastic" + #password: "changeme" + +# ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +# ================================= Processors ================================= + +processors: + - # Add forwarded to tags when processing data from a network tap or mirror. + if.contains.tags: forwarded + then: + - drop_fields: + fields: [host] + else: + - add_host_metadata: ~ + - add_cloud_metadata: ~ + - add_docker_metadata: ~ + +# ================================== Logging =================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publish", "service". +#logging.selectors: ["*"] + +# ============================= X-Pack Monitoring ============================== +# Packetbeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# Packetbeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +#monitoring.elasticsearch: + +# ============================== Instrumentation =============================== + +# Instrumentation support for the packetbeat. +#instrumentation: + # Set to true to enable instrumentation of packetbeat. + #enabled: false + + # Environment in which packetbeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: true + From 44bdabc302e588bd10904fdc6b3e44577516b24c Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 27 Oct 2020 16:23:22 +0000 Subject: [PATCH 76/93] [CI][flaky] reporting for PRs in GitHub (#21853) --- Jenkinsfile | 8 +++++++- filebeat/Jenkinsfile.yml | 4 +--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 95f270e9e64..ab40d182e81 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -127,7 +127,13 @@ pipeline { runbld(stashedTestReports: stashedTestReports, project: env.REPO) } cleanup { - notifyBuildResult(prComment: true, slackComment: true, slackNotify: (isBranch() || isTag())) + // Required to enable the flaky test reporting with GitHub. Workspace exists since the post/always runs earlier + dir("${BASE_DIR}"){ + // TODO analyzeFlakey does not support other release branches but the master branch. + notifyBuildResult(prComment: true, + slackComment: true, slackNotify: (isBranch() || isTag()), + analyzeFlakey: true, flakyReportIdx: "reporter-beats-beats-master") + } } } } diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index d8ea11c24a5..d2e1d866526 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -67,6 +67,4 @@ stages: labels: - "windows-10" branches: true ## for all the branches - tags: true ## for all the tags - branches: true ## for all the branches - tags: true ## for all the tags + tags: true ## for all the tags \ No newline at end of file From 202c7fe7956f7179ad1fa951a6808abd5870d1a0 Mon Sep 17 00:00:00 2001 From: Luca Belluccini Date: Tue, 27 Oct 2020 18:09:52 +0100 Subject: [PATCH 77/93] [DOCS] Warn about compression and Azure Event Hub for Kafka (#21578) --- libbeat/outputs/kafka/docs/kafka.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/libbeat/outputs/kafka/docs/kafka.asciidoc b/libbeat/outputs/kafka/docs/kafka.asciidoc index 0bd245a4bf2..635bf225556 100644 --- a/libbeat/outputs/kafka/docs/kafka.asciidoc +++ b/libbeat/outputs/kafka/docs/kafka.asciidoc @@ -261,6 +261,12 @@ The keep-alive period for an active network connection. If 0s, keep-alives are d Sets the output compression codec. Must be one of `none`, `snappy`, `lz4` and `gzip`. The default is `gzip`. +[IMPORTANT] +.Known issue with Azure Event Hub for Kafka +==== +When targeting Azure Event Hub for Kafka, set `compression` to `none` as the provided codecs are not supported. +==== + ===== `compression_level` Sets the compression level used by gzip. Setting this value to 0 disables compression. From 601c3237488bfa6bfe6b186e5aa98f4a707c0417 Mon Sep 17 00:00:00 2001 From: Peter Deng Date: Wed, 28 Oct 2020 01:10:29 +0800 Subject: [PATCH 78/93] Update shared-autodiscover.asciidoc (#21827) add descriptions for default values of `labels.dedot` and `annotations.dedot` --- libbeat/docs/shared-autodiscover.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libbeat/docs/shared-autodiscover.asciidoc b/libbeat/docs/shared-autodiscover.asciidoc index df0ea4d2e02..ec1d599ddfb 100644 --- a/libbeat/docs/shared-autodiscover.asciidoc +++ b/libbeat/docs/shared-autodiscover.asciidoc @@ -171,9 +171,10 @@ If the `exclude_labels` config is added to the provider config, then the list of will be excluded from the event. if the `labels.dedot` config is set to be `true` in the provider config, then `.` in labels will be replaced with `_`. +By default it is `true`. if the `annotations.dedot` config is set to be `true` in the provider config, then `.` in annotations will be replaced -with `_`. +with `_`. By default it is `true`. For example: From d395a4433ced1f3a5b966b9caeb98812b22f2c6e Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 27 Oct 2020 18:11:00 +0100 Subject: [PATCH 79/93] [Ingest Manager] Agent fix snapshot download for upgrade (#22175) [Ingest Manager] Agent fix snapshot download for upgrade (#22175) --- .../pkg/agent/application/stream.go | 4 +- .../application/upgrade/step_download.go | 58 ++++++++++++++++++- x-pack/elastic-agent/pkg/artifact/config.go | 2 +- .../pkg/artifact/download/fs/verifier.go | 7 ++- .../pkg/artifact/download/http/downloader.go | 12 ++-- .../download/localremote/downloader.go | 6 +- .../artifact/download/localremote/verifier.go | 6 +- .../artifact/download/snapshot/downloader.go | 20 +++++-- .../artifact/download/snapshot/verifier.go | 4 +- 9 files changed, 91 insertions(+), 28 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/application/stream.go b/x-pack/elastic-agent/pkg/agent/application/stream.go index 2d372ef4387..0cdbd99082a 100644 --- a/x-pack/elastic-agent/pkg/agent/application/stream.go +++ b/x-pack/elastic-agent/pkg/agent/application/stream.go @@ -57,9 +57,9 @@ func streamFactory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configur } func newOperator(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, id routingKey, config *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor) (*operation.Operator, error) { - fetcher := downloader.NewDownloader(log, config.DownloadConfig, false) + fetcher := downloader.NewDownloader(log, config.DownloadConfig) allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp, false) + verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) if err != nil { return nil, errors.New(err, "initiating verifier") } diff --git a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go index 0294308ff3a..e96296016a4 100644 --- a/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go +++ b/x-pack/elastic-agent/pkg/agent/application/upgrade/step_download.go @@ -9,7 +9,14 @@ import ( "strings" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/download" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/download/composed" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/download/fs" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/download/http" downloader "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/download/localremote" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/artifact/download/snapshot" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/release" ) @@ -26,13 +33,16 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } } - allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(u.log, &settings, allowEmptyPgp, pgp, true) + verifier, err := newVerifier(version, u.log, &settings) if err != nil { return "", errors.New(err, "initiating verifier") } - fetcher := downloader.NewDownloader(u.log, &settings, true) + fetcher, err := newDownloader(version, u.log, &settings) + if err != nil { + return "", errors.New(err, "initiating fetcher") + } + path, err := fetcher.Download(ctx, agentName, agentArtifactName, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") @@ -48,3 +58,45 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri return path, nil } + +func newDownloader(version string, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { + if !strings.HasSuffix(version, "-SNAPSHOT") { + return downloader.NewDownloader(log, settings), nil + } + + // try snapshot repo before official + snapDownloader, err := snapshot.NewDownloader(settings, version) + if err != nil { + return nil, err + } + + return composed.NewDownloader( + fs.NewDownloader(settings), + snapDownloader, + http.NewDownloader(settings), + ), nil +} + +func newVerifier(version string, log *logger.Logger, settings *artifact.Config) (download.Verifier, error) { + allowEmptyPgp, pgp := release.PGP() + if !strings.HasSuffix(version, "-SNAPSHOT") { + return downloader.NewVerifier(log, settings, allowEmptyPgp, pgp) + } + + fsVerifier, err := fs.NewVerifier(settings, allowEmptyPgp, pgp) + if err != nil { + return nil, err + } + + snapshotVerifier, err := snapshot.NewVerifier(settings, allowEmptyPgp, pgp, version) + if err != nil { + return nil, err + } + + remoteVerifier, err := http.NewVerifier(settings, allowEmptyPgp, pgp) + if err != nil { + return nil, err + } + + return composed.NewVerifier(fsVerifier, snapshotVerifier, remoteVerifier), nil +} diff --git a/x-pack/elastic-agent/pkg/artifact/config.go b/x-pack/elastic-agent/pkg/artifact/config.go index 5b0766cb257..81ed3f856fc 100644 --- a/x-pack/elastic-agent/pkg/artifact/config.go +++ b/x-pack/elastic-agent/pkg/artifact/config.go @@ -47,7 +47,7 @@ func DefaultConfig() *Config { return &Config{ SourceURI: "https://artifacts.elastic.co/downloads/", TargetDirectory: filepath.Join(homePath, "downloads"), - Timeout: 30 * time.Second, + Timeout: 120 * time.Second, // binaries are a getting bit larger it might take >30s to download them InstallPath: filepath.Join(homePath, "install"), } } diff --git a/x-pack/elastic-agent/pkg/artifact/download/fs/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/fs/verifier.go index 56652d4f69c..e9e405a5def 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/fs/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/fs/verifier.go @@ -22,7 +22,8 @@ import ( ) const ( - ascSuffix = ".asc" + ascSuffix = ".asc" + sha512Length = 128 ) // Verifier verifies a downloaded package by comparing with public ASC @@ -93,7 +94,9 @@ func (v *Verifier) verifyHash(filename, fullPath string) (bool, error) { continue } - expectedHash = strings.TrimSpace(strings.TrimSuffix(line, filename)) + if len(line) > sha512Length { + expectedHash = strings.TrimSpace(line[:sha512Length]) + } } if expectedHash == "" { diff --git a/x-pack/elastic-agent/pkg/artifact/download/http/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/http/downloader.go index 358b793fccf..4af71eca0db 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/http/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/http/downloader.go @@ -132,6 +132,12 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } + destinationFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, packagePermissions) + if err != nil { + return "", errors.New(err, "creating package file failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) + } + defer destinationFile.Close() + resp, err := e.client.Do(req.WithContext(ctx)) if err != nil { return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) @@ -142,12 +148,6 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f return "", errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } - destinationFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, packagePermissions) - if err != nil { - return "", errors.New(err, "creating package file failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) - } - defer destinationFile.Close() - _, err = io.Copy(destinationFile, resp.Body) return fullPath, nil } diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go index ba82195ffbd..6934adc1ea3 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/downloader.go @@ -17,13 +17,13 @@ import ( // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(log *logger.Logger, config *artifact.Config, forceSnapshot bool) download.Downloader { +func NewDownloader(log *logger.Logger, config *artifact.Config) download.Downloader { downloaders := make([]download.Downloader, 0, 3) downloaders = append(downloaders, fs.NewDownloader(config)) // try snapshot repo before official - if release.Snapshot() || forceSnapshot { - snapDownloader, err := snapshot.NewDownloader(config) + if release.Snapshot() { + snapDownloader, err := snapshot.NewDownloader(config, "") if err != nil { log.Error(err) } else { diff --git a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go index 30517d12d3d..119121df0cd 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/localremote/verifier.go @@ -17,7 +17,7 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte, forceSnapshot bool) (download.Verifier, error) { +func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { verifiers := make([]download.Verifier, 0, 3) fsVer, err := fs.NewVerifier(config, allowEmptyPgp, pgp) @@ -27,8 +27,8 @@ func NewVerifier(log *logger.Logger, config *artifact.Config, allowEmptyPgp bool verifiers = append(verifiers, fsVer) // try snapshot repo before official - if release.Snapshot() || forceSnapshot { - snapshotVerifier, err := snapshot.NewVerifier(config, allowEmptyPgp, pgp) + if release.Snapshot() { + snapshotVerifier, err := snapshot.NewVerifier(config, allowEmptyPgp, pgp, "") if err != nil { log.Error(err) } else { diff --git a/x-pack/elastic-agent/pkg/artifact/download/snapshot/downloader.go b/x-pack/elastic-agent/pkg/artifact/download/snapshot/downloader.go index 6f28ad8d926..a5b706a243c 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/snapshot/downloader.go +++ b/x-pack/elastic-agent/pkg/artifact/download/snapshot/downloader.go @@ -18,16 +18,16 @@ import ( // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewDownloader(config *artifact.Config) (download.Downloader, error) { - cfg, err := snapshotConfig(config) +func NewDownloader(config *artifact.Config, versionOverride string) (download.Downloader, error) { + cfg, err := snapshotConfig(config, versionOverride) if err != nil { return nil, err } return http.NewDownloader(cfg), nil } -func snapshotConfig(config *artifact.Config) (*artifact.Config, error) { - snapshotURI, err := snapshotURI() +func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact.Config, error) { + snapshotURI, err := snapshotURI(versionOverride) if err != nil { return nil, fmt.Errorf("failed to detect remote snapshot repo, proceeding with configured: %v", err) } @@ -43,8 +43,16 @@ func snapshotConfig(config *artifact.Config) (*artifact.Config, error) { }, nil } -func snapshotURI() (string, error) { - artifactsURI := fmt.Sprintf("https://artifacts-api.elastic.co/v1/search/%s-SNAPSHOT/elastic-agent", release.Version()) +func snapshotURI(versionOverride string) (string, error) { + version := release.Version() + if versionOverride != "" { + if strings.HasSuffix(versionOverride, "-SNAPSHOT") { + versionOverride = strings.TrimSuffix(versionOverride, "-SNAPSHOT") + } + version = versionOverride + } + + artifactsURI := fmt.Sprintf("https://artifacts-api.elastic.co/v1/search/%s-SNAPSHOT/elastic-agent", version) resp, err := gohttp.Get(artifactsURI) if err != nil { return "", err diff --git a/x-pack/elastic-agent/pkg/artifact/download/snapshot/verifier.go b/x-pack/elastic-agent/pkg/artifact/download/snapshot/verifier.go index e9d8bbd4dc1..63757880f96 100644 --- a/x-pack/elastic-agent/pkg/artifact/download/snapshot/verifier.go +++ b/x-pack/elastic-agent/pkg/artifact/download/snapshot/verifier.go @@ -12,8 +12,8 @@ import ( // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. -func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (download.Verifier, error) { - cfg, err := snapshotConfig(config) +func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versionOverride string) (download.Verifier, error) { + cfg, err := snapshotConfig(config, versionOverride) if err != nil { return nil, err } From cf7bdba1413d460f932f3f661a986a6f1efae50a Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Tue, 27 Oct 2020 11:53:01 -0700 Subject: [PATCH 80/93] [Docs] Update custom beat docs (#22194) * update custom beat docs * add changelog * update explanation * Replace make with mage --- CHANGELOG.next.asciidoc | 1 + docs/devguide/create-metricset.asciidoc | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 3022e1a5215..05e9a8e23de 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -188,6 +188,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - The `o365input` and `o365` module now recover from an authentication problem or other fatal errors, instead of terminating. {pull}21259[21258] - Orderly close processors when processing pipelines are not needed anymore to release their resources. {pull}16349[16349] - Fix memory leak and events duplication in docker autodiscover and add_docker_metadata. {pull}21851[21851] +- Fixed documentation for commands in beats dev guide {pull}22194[22194] - Fix parsing of expired licences. {issue}21112[21112] {pull}22180[22180] *Auditbeat* diff --git a/docs/devguide/create-metricset.asciidoc b/docs/devguide/create-metricset.asciidoc index 3331d69d121..db84c3272fd 100644 --- a/docs/devguide/create-metricset.asciidoc +++ b/docs/devguide/create-metricset.asciidoc @@ -32,12 +32,12 @@ The metricset that you created is already a functioning metricset and can be com + [source,bash] ---- -make collect -make +mage update +mage build ---- + -The first command, `make collect`, updates all generated files with the most recent files, data, and meta information from the metricset. The second command, -`make`, compiles your source code and provides you with a binary called metricbeat in the same folder. You can run the +The first command, `mage update`, updates all generated files with the most recent files, data, and meta information from the metricset. The second command, +`mage build`, compiles your source code and provides you with a binary called metricbeat in the same folder. You can run the binary in debug mode with the following command: + [source,bash] @@ -45,7 +45,7 @@ binary in debug mode with the following command: ./metricbeat -e -d "*" ---- -After running the make commands, you'll find the metricset, along with its generated files, under `module/{module}/{metricset}`. This directory +After running the mage commands, you'll find the metricset, along with its generated files, under `module/{module}/{metricset}`. This directory contains the following files: * `\{metricset}.go` From 7c461f8b02abcff7e5361e7eb9817cb979c0091e Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 27 Oct 2020 20:47:02 +0100 Subject: [PATCH 81/93] Move cloudfoundry tags with metadata to common metadata fields (#22150) In some Cloud Foundry deployments, metadata is included in tags, move these tags to the common fields so add_cloudfoundry_metadata is not needed for these cases. --- x-pack/libbeat/common/cloudfoundry/events.go | 46 ++++++++--- .../common/cloudfoundry/events_test.go | 81 +++++++++++++++++++ .../add_cloudfoundry_metadata.go | 22 +++++ .../add_cloudfoundry_metadata_test.go | 56 +++++++++++++ .../docs/add_cloudfoundry_metadata.asciidoc | 5 ++ .../cloudfoundry/container/_meta/data.json | 37 ++++++--- 6 files changed, 224 insertions(+), 23 deletions(-) diff --git a/x-pack/libbeat/common/cloudfoundry/events.go b/x-pack/libbeat/common/cloudfoundry/events.go index 3a0f1756b6e..adaa944773c 100644 --- a/x-pack/libbeat/common/cloudfoundry/events.go +++ b/x-pack/libbeat/common/cloudfoundry/events.go @@ -490,21 +490,45 @@ func envelopMap(evt Event) common.MapStr { } func baseMap(evt Event) common.MapStr { - return common.MapStr{ - "cloudfoundry": common.MapStr{ - "type": evt.String(), - "envelope": envelopMap(evt), - "tags": dedotedTags(evt.Tags()), - }, + tags, meta := tagsToMeta(evt.Tags()) + cf := common.MapStr{ + "type": evt.String(), + "envelope": envelopMap(evt), + } + if len(tags) > 0 { + cf["tags"] = tags + } + result := common.MapStr{ + "cloudfoundry": cf, } + if len(meta) > 0 { + result.DeepUpdate(meta) + } + return result } -func dedotedTags(tags map[string]string) common.MapStr { - result := common.MapStr{} - for name, value := range tags { - result[common.DeDot(name)] = value +func tagsToMeta(eventTags map[string]string) (tags common.MapStr, meta common.MapStr) { + tags = common.MapStr{} + meta = common.MapStr{} + for name, value := range eventTags { + switch name { + case "app_id": + meta.Put("cloudfoundry.app.id", value) + case "app_name": + meta.Put("cloudfoundry.app.name", value) + case "space_id": + meta.Put("cloudfoundry.space.id", value) + case "space_name": + meta.Put("cloudfoundry.space.name", value) + case "organization_id": + meta.Put("cloudfoundry.org.id", value) + case "organization_name": + meta.Put("cloudfoundry.org.name", value) + default: + tags[common.DeDot(name)] = value + } } - return result + return tags, meta } func baseMapWithApp(evt EventWithAppID) common.MapStr { diff --git a/x-pack/libbeat/common/cloudfoundry/events_test.go b/x-pack/libbeat/common/cloudfoundry/events_test.go index 7dfd9bdcbd7..af924b33ba3 100644 --- a/x-pack/libbeat/common/cloudfoundry/events_test.go +++ b/x-pack/libbeat/common/cloudfoundry/events_test.go @@ -382,6 +382,87 @@ func TestEventError(t *testing.T) { }, evt.ToFields()) } +func TestEventTagsWithMetadata(t *testing.T) { + eventType := events.Envelope_LogMessage + message := "log message" + messageType := events.LogMessage_OUT + timestamp := int64(1587469726082) + appID := "f47ac10b-58cc-4372-a567-0e02b2c3d479" + sourceType := "source_type" + sourceInstance := "source_instance" + cfEvt := makeEnvelope(&eventType) + tags := map[string]string{ + "app_id": appID, + "app_name": "some-app", + "space_id": "e1114e92-155c-11eb-ada9-27b81025a657", + "space_name": "some-space", + "organization_id": "baeef1ba-155c-11eb-a1af-8f14964c35d2", + "organization_name": "some-org", + "custom_tag": "foo", + } + cfEvt.Tags = tags + cfEvt.LogMessage = &events.LogMessage{ + Message: []byte(message), + MessageType: &messageType, + Timestamp: ×tamp, + AppId: &appID, + SourceType: &sourceType, + SourceInstance: &sourceInstance, + } + evt := newEventLog(cfEvt) + + assert.Equal(t, EventTypeLog, evt.EventType()) + assert.Equal(t, "log", evt.String()) + assert.Equal(t, "origin", evt.Origin()) + assert.Equal(t, time.Unix(0, 1587469726082), evt.Timestamp()) + assert.Equal(t, "deployment", evt.Deployment()) + assert.Equal(t, "job", evt.Job()) + assert.Equal(t, "index", evt.Index()) + assert.Equal(t, "ip", evt.IP()) + assert.Equal(t, tags, evt.Tags()) + assert.Equal(t, "f47ac10b-58cc-4372-a567-0e02b2c3d479", evt.AppGuid()) + assert.Equal(t, "log message", evt.Message()) + assert.Equal(t, EventLogMessageTypeStdout, evt.MessageType()) + assert.Equal(t, "source_type", evt.SourceType()) + assert.Equal(t, "source_instance", evt.SourceID()) + + assert.Equal(t, common.MapStr{ + "cloudfoundry": common.MapStr{ + "type": "log", + "log": common.MapStr{ + "source": common.MapStr{ + "instance": evt.SourceID(), + "type": evt.SourceType(), + }, + }, + "envelope": common.MapStr{ + "origin": "origin", + "deployment": "deployment", + "ip": "ip", + "job": "job", + "index": "index", + }, + "app": common.MapStr{ + "id": "f47ac10b-58cc-4372-a567-0e02b2c3d479", + "name": "some-app", + }, + "space": common.MapStr{ + "id": "e1114e92-155c-11eb-ada9-27b81025a657", + "name": "some-space", + }, + "org": common.MapStr{ + "id": "baeef1ba-155c-11eb-a1af-8f14964c35d2", + "name": "some-org", + }, + "tags": common.MapStr{ + "custom_tag": "foo", + }, + }, + "message": "log message", + "stream": "stdout", + }, evt.ToFields()) +} + func makeEnvelope(eventType *events.Envelope_EventType) *events.Envelope { timestamp := int64(1587469726082) origin := "origin" diff --git a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go index a6b8bd16566..50f551af969 100644 --- a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go +++ b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go @@ -69,6 +69,10 @@ func (d *addCloudFoundryMetadata) Run(event *beat.Event) (*beat.Event, error) { // wrong type or not set return event, nil } + if hasMetadataFields(event) { + // nothing to do, fields already present + return event, nil + } app, err := d.client.GetAppByGuid(val) if err != nil { d.log.Debugf("failed to get application info for GUID(%s): %v", val, err) @@ -108,3 +112,21 @@ func (d *addCloudFoundryMetadata) Close() error { } return nil } + +var metadataFields = []string{ + "cloudfoundry.app.id", + "cloudfoundry.app.name", + "cloudfoundry.space.id", + "cloudfoundry.space.name", + "cloudfoundry.org.id", + "cloudfoundry.org.name", +} + +func hasMetadataFields(event *beat.Event) bool { + for _, name := range metadataFields { + if value, err := event.GetValue(name); value == "" || err != nil { + return false + } + } + return true +} diff --git a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go index 95a7073321e..34fe866104d 100644 --- a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go +++ b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go @@ -82,6 +82,62 @@ func TestCFAppNotFound(t *testing.T) { assert.Equal(t, evt, *observed) } +func TestCFAppMetadataAlreadyPresent(t *testing.T) { + guid := mustCreateFakeGuid() + app := cloudfoundry.AppMeta{ + Guid: guid, + Name: "My Fake App", + SpaceGuid: mustCreateFakeGuid(), + SpaceName: "My Fake Space", + OrgGuid: mustCreateFakeGuid(), + OrgName: "My Fake Org", + } + p := addCloudFoundryMetadata{ + log: logp.NewLogger("add_cloudfoundry_metadata"), + client: &fakeClient{app}, + } + + evt := beat.Event{ + Fields: common.MapStr{ + "cloudfoundry": common.MapStr{ + "app": common.MapStr{ + "id": guid, + "name": "Other App Name", + }, + "space": common.MapStr{ + "id": app.SpaceGuid, + "name": app.SpaceName, + }, + "org": common.MapStr{ + "id": app.OrgGuid, + "name": app.OrgName, + }, + }, + }, + } + expected := beat.Event{ + Fields: common.MapStr{ + "cloudfoundry": common.MapStr{ + "app": common.MapStr{ + "id": guid, + "name": "Other App Name", + }, + "space": common.MapStr{ + "id": app.SpaceGuid, + "name": app.SpaceName, + }, + "org": common.MapStr{ + "id": app.OrgGuid, + "name": app.OrgName, + }, + }, + }, + } + observed, err := p.Run(&evt) + assert.NoError(t, err) + assert.Equal(t, expected, *observed) +} + func TestCFAppUpdated(t *testing.T) { guid := mustCreateFakeGuid() app := cloudfoundry.AppMeta{ diff --git a/x-pack/libbeat/processors/add_cloudfoundry_metadata/docs/add_cloudfoundry_metadata.asciidoc b/x-pack/libbeat/processors/add_cloudfoundry_metadata/docs/add_cloudfoundry_metadata.asciidoc index 67e89c8173b..e5123ae75a0 100644 --- a/x-pack/libbeat/processors/add_cloudfoundry_metadata/docs/add_cloudfoundry_metadata.asciidoc +++ b/x-pack/libbeat/processors/add_cloudfoundry_metadata/docs/add_cloudfoundry_metadata.asciidoc @@ -20,6 +20,11 @@ Each event is annotated with: * Organization ID * Organization Name +NOTE: Pivotal Application Service and Tanzu Application Service include this +metadata in all events from the firehose since version 2.8. In these cases the +metadata in the events is used, and `add_cloudfoundry_metadata` processor +doesn't modify these fields. + [source,yaml] ------------------------------------------------------------------------------- diff --git a/x-pack/metricbeat/module/cloudfoundry/container/_meta/data.json b/x-pack/metricbeat/module/cloudfoundry/container/_meta/data.json index 16a4f1ef128..57d788406ef 100644 --- a/x-pack/metricbeat/module/cloudfoundry/container/_meta/data.json +++ b/x-pack/metricbeat/module/cloudfoundry/container/_meta/data.json @@ -2,26 +2,39 @@ "@timestamp": "2017-10-12T08:05:34.853Z", "cloudfoundry": { "app": { - "id": "3ce55e14-de73-49af-836d-adc93f3fee39" + "id": "8d165a12-fbd8-40cb-b71a-5bc6086df04c", + "name": "log-gen" }, "container": { - "cpu.pct": 0.19431789913648675, - "disk.bytes": 16678912, - "disk.quota.bytes": 33554432, - "instance_index": 0, - "memory.bytes": 8529920, - "memory.quota.bytes": 33554432 + "cpu.pct": 4.231873716293137, + "disk.bytes": 122691584, + "disk.quota.bytes": 1073741824, + "instance_index": 3, + "memory.bytes": 52250065, + "memory.quota.bytes": 1073741824 }, "envelope": { - "deployment": "cf-6b7aee31c8d07637ad78", - "index": "c2bcf5d6-7ff9-4876-890f-6f8fc6c58668", - "ip": "192.168.16.51", + "deployment": "cf-9c11cd01425665e2ed6d", + "index": "9e1a45be-f8a4-44ef-9c34-22f6e51ce4c7", + "ip": "192.168.16.37", "job": "diego_cell", "origin": "rep" }, + "org": { + "id": "4af89198-dd33-4542-9915-f489542bc058", + "name": "elastic-logging-org" + }, + "space": { + "id": "10ed1559-e399-4034-babf-6424ef888dc1", + "name": "logging-space" + }, "tags": { - "product": "Pivotal Application Service", - "source_id": "3ce55e14-de73-49af-836d-adc93f3fee39" + "instance_id": "3", + "process_id": "8d165a12-fbd8-40cb-b71a-5bc6086df04c", + "process_instance_id": "75568c86-b9e0-4330-4784-928b", + "process_type": "web", + "product": "VMware Tanzu Application Service", + "source_id": "8d165a12-fbd8-40cb-b71a-5bc6086df04c" }, "type": "container" }, From 46fc975b2921ddf1aafa7a7db7188e2de85326b0 Mon Sep 17 00:00:00 2001 From: Ivan Fernandez Calvo Date: Wed, 28 Oct 2020 13:09:12 +0100 Subject: [PATCH 82/93] ci: improve linting speed (#22103) * ci: improve linting speed * fix: use multiple agents for the matrix * fix: add python-env * fix: set make folder parameter * feat: move k8s check and dev-tools to their own stages * fix: lint in a single stage is faster * fix: suggested changes * chore:change triggers * chore: update for packetbeat --- Jenkinsfile | 10 ++++++---- Jenkinsfile.yml | 3 ++- Makefile | 25 ++++++++++++++++++++----- auditbeat/Jenkinsfile.yml | 5 +++++ deploy/kubernetes/Jenkinsfile.yml | 3 +++ dev-tools/Jenkinsfile.yml | 16 ++++++++++++++++ filebeat/Jenkinsfile.yml | 5 +++++ heartbeat/Jenkinsfile.yml | 6 +++++- journalbeat/Jenkinsfile.yml | 5 +++++ libbeat/Jenkinsfile.yml | 5 +++++ metricbeat/Jenkinsfile.yml | 5 +++++ packetbeat/Jenkinsfile.yml | 5 +++++ winlogbeat/Jenkinsfile.yml | 5 +++++ x-pack/auditbeat/Jenkinsfile.yml | 5 +++++ x-pack/dockerlogbeat/Jenkinsfile.yml | 5 +++++ x-pack/elastic-agent/Jenkinsfile.yml | 5 +++++ x-pack/filebeat/Jenkinsfile.yml | 5 +++++ x-pack/functionbeat/Jenkinsfile.yml | 5 +++++ x-pack/libbeat/Jenkinsfile.yml | 5 +++++ x-pack/metricbeat/Jenkinsfile.yml | 5 +++++ x-pack/packetbeat/Jenkinsfile.yml | 5 +++++ x-pack/winlogbeat/Jenkinsfile.yml | 7 +++++++ 22 files changed, 134 insertions(+), 11 deletions(-) create mode 100644 dev-tools/Jenkinsfile.yml diff --git a/Jenkinsfile b/Jenkinsfile index ab40d182e81..7378612a7f7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -74,10 +74,12 @@ pipeline { GOFLAGS = '-mod=readonly' } steps { - withGithubNotify(context: 'Lint') { - withBeatsEnv(archive: false, id: 'lint') { + withGithubNotify(context: "Lint") { + withBeatsEnv(archive: false, id: "lint") { dumpVariables() - cmd(label: 'make check', script: 'make check') + cmd(label: "make check-python", script: "make check-python") + cmd(label: "make check-go", script: "make check-go") + cmd(label: "Check for changes", script: "make check-no-changes") } } } @@ -496,7 +498,7 @@ def terraformApply(String directory) { } /** -* Tear down the terraform environments, by looking for all terraform states in directory +* Tear down the terraform environments, by looking for all terraform states in directory * then it runs terraform destroy for each one. * It uses terraform states previously stashed by startCloudTestEnv. */ diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index f7b21e1cbdf..3edd9a75d5c 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -17,6 +17,7 @@ projects: - "x-pack/libbeat" - "x-pack/metricbeat" - "x-pack/winlogbeat" + - "dev-tools" ##- "x-pack/heartbeat" It's not yet in the 1.0 pipeline. ##- "x-pack/journalbeat" It's not yet in the 1.0 pipeline. ##- "x-pack/packetbeat" It's not yet in the 1.0 pipeline. @@ -45,4 +46,4 @@ disabled: labels: ## Skip the GitHub Pull Request builds if any of the given GitHub labels match with the assigned labels in the PR. - skip-ci ## TODO: This will allow to configure what to do based on the PR configuration - draft: true ## Skip the GitHub Pull Request builds with Draft PRs. \ No newline at end of file + draft: true ## Skip the GitHub Pull Request builds with Draft PRs. diff --git a/Makefile b/Makefile index 84ac9bb6e1a..b00cded4f9f 100644 --- a/Makefile +++ b/Makefile @@ -94,20 +94,35 @@ clean: mage ## check : TBD. .PHONY: check -check: python-env +check: @$(foreach var,$(PROJECTS) dev-tools $(PROJECTS_XPACK_MAGE),$(MAKE) -C $(var) check || exit 1;) - @$(FIND) -name *.py -name *.py -not -path "*/build/*" -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120 {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false) - @$(FIND) -name *.py -not -path "*/build/*" | xargs $(PYTHON_ENV)/bin/pylint --py3k -E || (echo "Code is not compatible with Python 3" && false) + $(MAKE) check-python # check if vendor folder does not exists [ ! -d vendor ] - @# Validate that all updates were committed + # Validate that all updates were committed @$(MAKE) update @$(MAKE) check-headers - go mod tidy + @$(MAKE) check-go + @$(MAKE) check-no-changes + +## ccheck-go : Check there is no changes in Go modules. +.PHONY: check-go +check-go: + @go mod tidy + +## ccheck-no-changes : Check there is no local changes. +.PHONY: check-no-changes +check-no-changes: @git diff | cat @git update-index --refresh @git diff-index --exit-code HEAD -- +## check-python : Python Linting. +.PHONY: check-python +check-python: python-env + @$(FIND) -name *.py -name *.py -not -path "*/build/*" -exec $(PYTHON_ENV)/bin/autopep8 -d --max-line-length 120 {} \; | (! grep . -q) || (echo "Code differs from autopep8's style" && false) + @$(FIND) -name *.py -not -path "*/build/*" | xargs $(PYTHON_ENV)/bin/pylint --py3k -E || (echo "Code is not compatible with Python 3" && false) + ## check-headers : Check the license headers. .PHONY: check-headers check-headers: mage diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index c68b5689f48..be9663b8e8d 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C auditbeat check; + make -C auditbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/deploy/kubernetes/Jenkinsfile.yml b/deploy/kubernetes/Jenkinsfile.yml index 452771edfb5..e75e2f54c10 100644 --- a/deploy/kubernetes/Jenkinsfile.yml +++ b/deploy/kubernetes/Jenkinsfile.yml @@ -2,6 +2,7 @@ when: branches: true ## for all the branches changeset: ## when PR contains any of those entries in the changeset - "^deploy/kubernetes/.*" + - "^libbeat/docs/version.asciidoc" comments: ## when PR comment contains any of those entries - "/test deploy/kubernetes" labels: ## when PR labels matches any of those entries @@ -11,5 +12,7 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + lint: + make: "make -C deploy/kubernetes all" k8sTest: k8sTest: "v1.18.2,v1.17.2,v1.16.4,v1.15.7,v1.14.10" diff --git a/dev-tools/Jenkinsfile.yml b/dev-tools/Jenkinsfile.yml new file mode 100644 index 00000000000..28ead9ec813 --- /dev/null +++ b/dev-tools/Jenkinsfile.yml @@ -0,0 +1,16 @@ +when: + branches: true ## for all the branches + changeset: ## when PR contains any of those entries in the changeset + - "^dev-tools/.*" + - "^libbeat/scripts/Makefile" + comments: ## when PR comment contains any of those entries + - "/test dev-tools" + labels: ## when PR labels matches any of those entries + - "dev-tools" + parameters: ## when parameter was selected in the UI. + - "dev-tools" + tags: true ## for all the tags +platform: "linux && ubuntu-18" ## default label for all the stages +stages: + lint: + make: "make -C dev-tools check" diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index d2e1d866526..f13f0329345 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C filebeat check; + make -C filebeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/heartbeat/Jenkinsfile.yml b/heartbeat/Jenkinsfile.yml index 032ec411892..6135df2d26f 100644 --- a/heartbeat/Jenkinsfile.yml +++ b/heartbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C heartbeat check; + make -C heartbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. @@ -78,4 +83,3 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags - diff --git a/journalbeat/Jenkinsfile.yml b/journalbeat/Jenkinsfile.yml index 5715712dd4a..9358d6c5197 100644 --- a/journalbeat/Jenkinsfile.yml +++ b/journalbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C journalbeat check; + make -C journalbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/libbeat/Jenkinsfile.yml b/libbeat/Jenkinsfile.yml index 692400e7253..0a59f7d4406 100644 --- a/libbeat/Jenkinsfile.yml +++ b/libbeat/Jenkinsfile.yml @@ -12,6 +12,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C libbeat check; + make -C libbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/metricbeat/Jenkinsfile.yml b/metricbeat/Jenkinsfile.yml index e6c4ffcef0e..993d0681469 100644 --- a/metricbeat/Jenkinsfile.yml +++ b/metricbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C metricbeat check; + make -C metricbeat update; + make check-no-changes; unitTest: mage: "mage build unitTest" goIntegTest: diff --git a/packetbeat/Jenkinsfile.yml b/packetbeat/Jenkinsfile.yml index ef373bb5f15..e956b7ddc6b 100644 --- a/packetbeat/Jenkinsfile.yml +++ b/packetbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C packetbeat check; + make -C packetbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index 3b9c71bf0c3..a7620c47138 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C winlogbeat check; + make -C winlogbeat update; + make check-no-changes; crosscompile: make: "make -C winlogbeat crosscompile" windows: diff --git a/x-pack/auditbeat/Jenkinsfile.yml b/x-pack/auditbeat/Jenkinsfile.yml index f4e55ea6372..a65ccfeeecd 100644 --- a/x-pack/auditbeat/Jenkinsfile.yml +++ b/x-pack/auditbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/auditbeat check; + make -C x-pack/auditbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/x-pack/dockerlogbeat/Jenkinsfile.yml b/x-pack/dockerlogbeat/Jenkinsfile.yml index 703bb3d66a9..1eed20f80ed 100644 --- a/x-pack/dockerlogbeat/Jenkinsfile.yml +++ b/x-pack/dockerlogbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/dockerlogbeat check; + make -C x-pack/dockerlogbeat update; + make check-no-changes; build: mage: "mage build test" withModule: true ## run the ITs only if the changeset affects a specific module. diff --git a/x-pack/elastic-agent/Jenkinsfile.yml b/x-pack/elastic-agent/Jenkinsfile.yml index d324e3381af..d1e415a6e78 100644 --- a/x-pack/elastic-agent/Jenkinsfile.yml +++ b/x-pack/elastic-agent/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/elastic-agent check; + make -C x-pack/elastic-agent update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/x-pack/filebeat/Jenkinsfile.yml b/x-pack/filebeat/Jenkinsfile.yml index d28520b7c32..460115bbe41 100644 --- a/x-pack/filebeat/Jenkinsfile.yml +++ b/x-pack/filebeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/filebeat check; + make -C x-pack/filebeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/x-pack/functionbeat/Jenkinsfile.yml b/x-pack/functionbeat/Jenkinsfile.yml index 117d92e3179..5d8cf74c480 100644 --- a/x-pack/functionbeat/Jenkinsfile.yml +++ b/x-pack/functionbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/functionbeat check; + make -C x-pack/functionbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/x-pack/libbeat/Jenkinsfile.yml b/x-pack/libbeat/Jenkinsfile.yml index ed22a8dfe70..9e5c382072d 100644 --- a/x-pack/libbeat/Jenkinsfile.yml +++ b/x-pack/libbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/libbeat check; + make -C x-pack/libbeat update; + make check-no-changes; arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 8506eb12e69..2fe5da311fa 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/metricbeat check; + make -C x-pack/metricbeat update; + make check-no-changes; build: cloud: "mage build test" withModule: true ## run the ITs only if the changeset affects a specific module. diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index 0ae3857acf6..baeb03a5e31 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -13,6 +13,11 @@ when: tags: true ## for all the tags platform: "linux && ubuntu-18" ## default label for all the stages stages: + Lint: + mage: | + mage check; + mage update; + make: "make check-no-changes" arm: mage: "mage build unitTest" platforms: ## override default label in this specific stage. diff --git a/x-pack/winlogbeat/Jenkinsfile.yml b/x-pack/winlogbeat/Jenkinsfile.yml index 45dfcad9d45..f95e44395e1 100644 --- a/x-pack/winlogbeat/Jenkinsfile.yml +++ b/x-pack/winlogbeat/Jenkinsfile.yml @@ -13,6 +13,13 @@ when: tags: true ## for all the tags platform: "windows-2019" ## default label for all the stages stages: + Lint: + make: | + make -C x-pack/winlogbeat check; + make -C x-pack/winlogbeat update; + make check-no-changes; + platforms: ## override default labels in this specific stage. + - "linux && ubuntu-18" build: mage: "mage build unitTest" withModule: true From 36775b2f909942bfd54cc8aa2aa809186f04c3a4 Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Wed, 28 Oct 2020 13:35:57 +0000 Subject: [PATCH 83/93] Perfmon - Fix regular expressions to comply to multiple parentheses in instance name and object (#22146) * mofidy doc * work on instance * changelog * fix tests --- CHANGELOG.next.asciidoc | 1 + .../helper/windows/pdh/pdh_query_windows.go | 44 +++++++++++-- .../windows/pdh/pdh_query_windows_test.go | 66 ++++++++++++++++++- metricbeat/module/windows/perfmon/data.go | 4 +- .../module/windows/perfmon/data_test.go | 6 +- 5 files changed, 111 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 05e9a8e23de..e58f904535c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -379,6 +379,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix retrieving resources by ID for the azure module. {pull}21711[21711] {issue}21707[21707] - Use timestamp from CloudWatch API when creating events. {pull}21498[21498] - Report the correct windows events for system/filesystem {pull}21758[21758] +- Fix regular expression in windows/permfon. {pull}22146[22146] {issue}21125[21125] - Fix azure storage event format. {pull}21845[21845] - Fix panic in kubernetes autodiscover related to keystores {issue}21843[21843] {pull}21880[21880] - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] diff --git a/metricbeat/helper/windows/pdh/pdh_query_windows.go b/metricbeat/helper/windows/pdh/pdh_query_windows.go index 3c51df5073a..8cc3a46edc8 100644 --- a/metricbeat/helper/windows/pdh/pdh_query_windows.go +++ b/metricbeat/helper/windows/pdh/pdh_query_windows.go @@ -30,7 +30,7 @@ import ( ) var ( - instanceNameRegexp = regexp.MustCompile(`.*?\((.*?)\).*`) + instanceNameRegexp = regexp.MustCompile(`(\(.+\))\\`) objectNameRegexp = regexp.MustCompile(`(?:^\\\\[^\\]+\\|^\\)([^\\]+)`) ) @@ -86,7 +86,7 @@ func (q *Query) AddCounter(counterPath string, instance string, format string, w var instanceName string // Extract the instance name from the counterPath. if instance == "" || wildcard { - instanceName, err = MatchInstanceName(counterPath) + instanceName, err = matchInstanceName(counterPath) if err != nil { return err } @@ -233,18 +233,50 @@ func (q *Query) Close() error { return PdhCloseQuery(q.Handle) } -// MatchInstanceName will check first for instance and then for any objects names. -func MatchInstanceName(counterPath string) (string, error) { +// matchInstanceName will check first for instance and then for any objects names. +func matchInstanceName(counterPath string) (string, error) { matches := instanceNameRegexp.FindStringSubmatch(counterPath) - if len(matches) != 2 { - matches = objectNameRegexp.FindStringSubmatch(counterPath) + if len(matches) == 2 { + return returnLastInstance(matches[1]), nil } + matches = objectNameRegexp.FindStringSubmatch(counterPath) if len(matches) == 2 { return matches[1], nil } return "", errors.New("query doesn't contain an instance name. In this case you have to define 'instance_name'") } +// returnLastInstance will return the content from the last parentheses, this covers cases as `\WF (System.Workflow) 4.0.0.0(*)\Workflows Created`. +func returnLastInstance(match string) string { + var openedParanth int + var innerMatch string + var matches []string + runeMatch := []rune(match) + for i := 0; i < len(runeMatch); i++ { + char := string(runeMatch[i]) + + // check if string ends between parentheses + if char == ")" { + openedParanth -= 1 + } + if openedParanth > 0 { + innerMatch += char + } + if openedParanth == 0 && innerMatch != "" { + matches = append(matches, innerMatch) + innerMatch = "" + } + // check if string starts between parentheses + if char == "(" { + openedParanth += 1 + } + } + if len(matches) > 0 { + return matches[len(matches)-1] + } + return match +} + // getCounterValue will retrieve the counter value based on the format applied in the config options func getCounterValue(counter *Counter) CounterValue { counterValue := CounterValue{Instance: counter.instanceName, Err: CounterValueError{CStatus: 0}} diff --git a/metricbeat/helper/windows/pdh/pdh_query_windows_test.go b/metricbeat/helper/windows/pdh/pdh_query_windows_test.go index 20d894d3924..3a09d6d77b5 100644 --- a/metricbeat/helper/windows/pdh/pdh_query_windows_test.go +++ b/metricbeat/helper/windows/pdh/pdh_query_windows_test.go @@ -89,6 +89,48 @@ func TestSuccessfulQuery(t *testing.T) { assert.NotNil(t, list) } +func TestMatchInstanceName(t *testing.T) { + query := "\\SQLServer:Databases(*)\\Log File(s) Used Size (KB)" + match, err := matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "*") + + query = " \\\\desktop-rfooe09\\per processor network interface card activity(3, microsoft wi-fi directvirtual (gyfyg) adapter #2)\\dpcs queued/sec" + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "3, microsoft wi-fi directvirtual (gyfyg) adapter #2") + + query = " \\\\desktop-rfooe09\\ (test this scenario) per processor network interface card activity(3, microsoft wi-fi directvirtual (gyfyg) adapter #2)\\dpcs queued/sec" + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "3, microsoft wi-fi directvirtual (gyfyg) adapter #2") + + query = "\\RAS\\Bytes Received By Disconnected Clients" + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "RAS") + + query = `\\Process (chrome.exe#4)\\Bytes Received By Disconnected Clients` + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "chrome.exe#4") + + query = "\\BranchCache\\Local Cache: Cache complete file segments" + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "BranchCache") + + query = `\Synchronization(*)\Exec. Resource no-Waits AcqShrdStarveExcl/sec` + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "*") + + query = `\.NET CLR Exceptions(test hellp (dsdsd) #rfsfs #3)\# of Finallys / sec` + match, err = matchInstanceName(query) + assert.NoError(t, err) + assert.Equal(t, match, "test hellp (dsdsd) #rfsfs #3") +} + // TestInstanceNameRegexp tests regular expression for instance. func TestInstanceNameRegexp(t *testing.T) { queryPaths := []string{`\SQLServer:Databases(*)\Log File(s) Used Size (KB)`, `\Search Indexer(*)\L0 Indexes (Wordlists)`, @@ -96,7 +138,7 @@ func TestInstanceNameRegexp(t *testing.T) { for _, path := range queryPaths { matches := instanceNameRegexp.FindStringSubmatch(path) if assert.Len(t, matches, 2, "regular expression did not return any matches") { - assert.Equal(t, matches[1], "*") + assert.Equal(t, matches[1], "(*)") } } } @@ -114,6 +156,28 @@ func TestObjectNameRegexp(t *testing.T) { } } +func TestReturnLastInstance(t *testing.T) { + query := "(*)" + match := returnLastInstance(query) + assert.Equal(t, match, "*") + + query = "(3, microsoft wi-fi directvirtual (gyfyg) adapter #2)" + match = returnLastInstance(query) + assert.Equal(t, match, "3, microsoft wi-fi directvirtual (gyfyg) adapter #2") + + query = "(test this scenario) per processor network interface card activity(3, microsoft wi-fi directvirtual (gyfyg) adapter #2)" + match = returnLastInstance(query) + assert.Equal(t, match, "3, microsoft wi-fi directvirtual (gyfyg) adapter #2") + + query = `(chrome.exe#4)` + match = returnLastInstance(query) + assert.Equal(t, match, "chrome.exe#4") + + query = `(test hellp (dsdsd) #rfsfs #3)` + match = returnLastInstance(query) + assert.Equal(t, match, "test hellp (dsdsd) #rfsfs #3") +} + func TestUTF16ToStringArray(t *testing.T) { var array = []string{"\\\\DESKTOP-RFOOE09\\Physikalischer Datenträger(0 C:)\\Schreibvorgänge/s", "\\\\DESKTOP-RFOOE09\\Physikalischer Datenträger(_Total)\\Schreibvorgänge/s", ""} var unicode []uint16 diff --git a/metricbeat/module/windows/perfmon/data.go b/metricbeat/module/windows/perfmon/data.go index 833c5757220..6e4e9c6ef0a 100644 --- a/metricbeat/module/windows/perfmon/data.go +++ b/metricbeat/module/windows/perfmon/data.go @@ -33,7 +33,7 @@ import ( "github.com/elastic/beats/v7/metricbeat/mb" ) -var processRegexp = regexp.MustCompile(`(.+?)#[1-9]+`) +var processRegexp = regexp.MustCompile(`(.+?[^\s])(?:#\d+|$)`) func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Event { eventMap := make(map[string]*mb.Event) @@ -73,7 +73,7 @@ func (re *Reader) groupToEvents(counters map[string][]pdh.CounterValue) []mb.Eve Error: errors.Wrapf(val.Err.Error, "failed on query=%v", counterPath), } if val.Instance != "" { - //will ignore instance counter + //will ignore instance index if ok, match := matchesParentProcess(val.Instance); ok { eventMap[eventKey].MetricSetFields.Put(counter.InstanceField, match) } else { diff --git a/metricbeat/module/windows/perfmon/data_test.go b/metricbeat/module/windows/perfmon/data_test.go index df23d1667ff..7203963d2cc 100644 --- a/metricbeat/module/windows/perfmon/data_test.go +++ b/metricbeat/module/windows/perfmon/data_test.go @@ -159,9 +159,13 @@ func TestGroupToSingleEvent(t *testing.T) { func TestMatchesParentProcess(t *testing.T) { ok, val := matchesParentProcess("svchost") - assert.False(t, ok) + assert.True(t, ok) assert.Equal(t, val, "svchost") ok, val = matchesParentProcess("svchost#54") assert.True(t, ok) assert.Equal(t, val, "svchost") + + ok, val = matchesParentProcess("svchost (test) another #54") + assert.True(t, ok) + assert.Equal(t, val, "svchost (test) another #54") } From 577c25b312866d9e081eac9600b33fda597e4fdc Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 28 Oct 2020 14:12:50 +0000 Subject: [PATCH 84/93] [CI][flaky] Support 7.x branches and PRs (#22197) --- Jenkinsfile | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7378612a7f7..d12848b28cc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -131,15 +131,41 @@ pipeline { cleanup { // Required to enable the flaky test reporting with GitHub. Workspace exists since the post/always runs earlier dir("${BASE_DIR}"){ - // TODO analyzeFlakey does not support other release branches but the master branch. notifyBuildResult(prComment: true, slackComment: true, slackNotify: (isBranch() || isTag()), - analyzeFlakey: true, flakyReportIdx: "reporter-beats-beats-master") + analyzeFlakey: !isTag(), flakyReportIdx: "reporter-beats-beats-${getIdSuffix()}") } } } } +/** +* There are only two supported branches, master and 7.x +*/ +def getIdSuffix() { + if(isPR()) { + return getBranchIndice(env.CHANGE_TARGET) + } + if(isBranch()) { + return getBranchIndice(env.BRANCH_NAME) + } +} + +/** +* There are only two supported branches, master and 7.x +*/ +def getBranchIndice(String compare) { + if (compare?.equals('master') || compare.equals('7.x')) { + return compare + } else { + if (compare.startsWith('7.')) { + return '7.x' + } + } + return 'master' +} + + /** * This method is the one used for running the parallel stages, therefore * its arguments are passed by the beatsStages step. From 310dbf6273bb7cf52e89a3eb1804e621d69d1fe8 Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Wed, 28 Oct 2020 09:53:54 -0500 Subject: [PATCH 85/93] Add pe fields to Sysmon module (#22217) - ProcessCreate event - LoadImage event Closes #17335 --- CHANGELOG.next.asciidoc | 1 + .../module/sysmon/config/winlogbeat-sysmon.js | 48 +++++++++ .../test/testdata/sysmon-12-loadimage.evtx | Bin 0 -> 69632 bytes .../sysmon-12-loadimage.evtx.golden.json | 94 +++++++++++++++++ .../testdata/sysmon-12-processcreate.evtx | Bin 0 -> 69632 bytes .../sysmon-12-processcreate.evtx.golden.json | 96 ++++++++++++++++++ .../testdata/sysmon-9.01.evtx.golden.json | 18 ++++ 7 files changed, 257 insertions(+) create mode 100644 x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx create mode 100644 x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx.golden.json create mode 100644 x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx create mode 100644 x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx.golden.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e58f904535c..874fa29264a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -800,6 +800,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add registry and code signature information and ECS categorization fields for sysmon module {pull}18058[18058] - Add new winlogbeat security dashboard {pull}18775[18775] - Add `event.outcome` to events based on the audit success and audit failure keywords. {pull}20564[20564] +- Add file.pe and process.pe fields to ProcessCreate & LoadImage events in Sysmon module. {issue}17335[17335] {pull}22217[22217] *Elastic Log Driver* - Add support for `docker logs` command {pull}19531[19531] diff --git a/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js b/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js index 5b09c98fc32..d5921722210 100644 --- a/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js +++ b/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js @@ -677,11 +677,37 @@ var sysmon = (function () { from: "winlog.event_data.ParentCommandLine", to: "process.parent.command_line", }, + { + from: "winlog.event_data.OriginalFileName", + to: "process.pe.original_file_name", + }, ], mode: "rename", ignore_missing: true, fail_on_error: false, }) + .Convert({ + fields: [{ + from: "winlog.event_data.Company", + to: "process.pe.company", + }, + { + from: "winlog.event_data.Description", + to: "process.pe.description", + }, + { + from: "winlog.event_data.FileVersion", + to: "process.pe.file_version", + }, + { + from: "winlog.event_data.Product", + to: "process.pe.product", + }, + ], + mode: "copy", + ignore_missing: true, + fail_on_error: false, + }) .Add(setRuleName) .Add(setProcessNameUsingExe) .Add(splitProcessArgs) @@ -951,6 +977,11 @@ var sysmon = (function () { from: "winlog.event_data.ImageLoaded", to: "file.path", }, + { + from: "winlog.event_data.OriginalFileName", + to: "file.pe.original_file_name", + }, + ], mode: "rename", ignore_missing: true, @@ -965,7 +996,24 @@ var sysmon = (function () { from: "winlog.event_data.SignatureStatus", to: "file.code_signature.status", }, + { + from: "winlog.event_data.Company", + to: "file.pe.company", + }, + { + from: "winlog.event_data.Description", + to: "file.pe.description", + }, + { + from: "winlog.event_data.FileVersion", + to: "file.pe.file_version", + }, + { + from: "winlog.event_data.Product", + to: "file.pe.product", + }, ], + ignore_missing: true, fail_on_error: false, }) .Add(setRuleName) diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx new file mode 100644 index 0000000000000000000000000000000000000000..7414b7fd316f6b10138ac927666ce3cfc043f21a GIT binary patch literal 69632 zcmeHQYiwM{b)H==S4&DHrPxYrN4BXru}#IKm;2(b01U;63HcO_L`M|MB) zp5Nu3e*OpFef9N^;Y%25c%OP4m;6BfzO|=Sjj2afNzJQe+&_gQ{yy*Jx4T-^V`@&# z;^h)vv_`V(41z7Iv+5poH~yB?DLg45Oaa$@IM3k8qFTV+f|^FiKHM+h`Q7R~-p%8j zQc3(}aFtYf{4cF$zf^G-jjXIZkGhh2TX-n0Ce#I_ysUFX10P;lc@OB48vXSX33Wo} z{wVS)BGe+@=5UNlV|?cqV(LKcN3`%Mq@1mW%RV;=&z6CeZ#Eai=RmTvJbv3I4DF%wF1GqWhwbQXDV(KAX z7w^|WqRcR&x^Y7m;P3J+arK9&KvoNjh}lJ$Qmc9^ZiI8*_pb@{PQ1Q=dRss_7E$gD z+N$=)Q0ymg^BFJ2!=K!ZOlNeNjv#0W1h9&V2&0RKT*NkY?-T76H7Jo#JaM(JCDM-8 zQpS?;mJVNO|1lCI$)v=Te6+J&wUd@75lH~y>#yxrBRUxAnAMa>u}&h~__)X6?Z0kU z!$@%%gtN6Q<4aAtox4<5oS?vJ&;HXs#jH(A{H7~J>R@wOS(!r~0wT{o(}oXtOcQ=i zXJo3EXgrSERHUW-M{Qo4J%xZv`17bps6?F5%Bp>GFo6<23$SVbOa#_Xh`##ZdTe}6^TVwR#MCusK$yrJfF}7HHW4^;ta1uTO+$r90he6c`=P; z#S%V(hyuM|?Mt8%SsCXL#uS`Vw<1JbPfYCpP@eRzw8jVm4r)vm)Kx_+L--CotDj^g|u@q-An3xD44;(z_4LBqd5 zg5O3qf8S%?w15TpYh~8QL31TU^{R*QY#w#HfX2S0dc8(Ehx>W;NdD}@ho1Y%$Uhv5 zzdrM+|9z>}_;+h=V^NSROj{&A7|4F^c>aUZiST;zJ14*V%;GOU`+-9b=59dyV<^^C6t?T0pF(4E(<=Y!^#EF8fkpx=wIZ#kqMZ#zY zs)QB&HZT60%At3mDwk1W!mQB_P{gpkURYiACgt6MrYvw8?TD%_#$kn!zBiZCU)%hP zD%#Pa5)6OP3(wm3sBcA+Vs~>I^hZ0;jIKyaIovVSWC8U{{*89Dt`lb*HN=vsgyd*P ze4RL>2+xj?+;Dcaj-wsBOdM;Onr^CydLRTvI}+=JC7zK@$&VKhw#|ec@XCf|>Xpr2 zv^7$38pk}s&no#I>GPr;U?`T@ZC>pC<=Ans6}xeE=u60F_c|dbbS^{~mgpW6(ptL` zk`0lF!CLDu;jL?!6hwzUTn|84dE}YY zSAP7`Uq14x#;Ulw?|KFJyhlw2uw$~KgG(cgNtDnm{?4ElDZ2Eb1lT838=C;i5@6R? zLTr;7Y3|cUKj<3(rD@#v;|uQeC^~}DW?L@m=Q)IBoPHfEkE^8W0Vk0si1G%-G4IbEV%GTG;c))k$xhqSQt8)X%^8y~`L0Nu2xS zo#|)Kzt}T!0KCh7bg|`6v~aX2^bI){VpYY=a<_!eSSB~`;fg0s5KFf~B~a%FY+TG{?i;LFeJ5}VLH8L@W~;*vqW zNsT(@z3J)rUNE^E-!YH)L}(%+?>Xq0(-H?I#BPje3|;r*Is4>gbCT8W^hk)BmNAjFNP9oK;AM=F%o-?a4E7LvZ(KRI8$#1w1qY&yd zcpmn+Rk+wNZLh`;rO?-++pZijYz9BRR0VzrMV_4Q$B$(neh}?gLIu<*`A!9RTLBM( zNKh`xU~E$-jlvK1Ma;qEBAi{OhIQr;WC``<5ajAdkZ!?`Zva1zgo+>kukj?t!<&4UPh1yH6Id7M3(UP4t_aeZEVBaL>y`gDbt)opK+!5l2aKK z)|Wu~(dU=LIs~EO+l=vcx(u?hq-d8856vuNk1^r(n!J&!Taly2c1UFCH9&m%+; z&-pfsCxbY6l|+b=9%-3!5}bA8Br3kIt#K0YL*$%1aAY)8{P=UrI4Ou9LXibOrYqze zj!iiRt_?Yc^_Z6N8jBgtmunnmINgfqT`ILFwdz)Y6YanW;sxc76F85BiW^_60yl&r z3vPVaha1Fuj$`OjG7o9Hg#YwYF{7aBw6yj#8jB~{rjL^xo;W<&8a&au*Qmx3;>l6q z$#|%E^4C@1iBM$0lgE8{BKB~z) z+8}<2KIHyTaiki3h)`s~5x+jf*^O(l8#`IV$s$`MizKwYSn%U$sQ6*C8wd3vLXibO zoZWayTcqaUC9ttQLA0L666p}cA;=aXi0D&kmw6ym{IJ=LgZLp7S@6TzjTfN@Hhy{z zLFlD@_4}@EjPv&K4Q;j(>o^IXCB!F80PzsYf(LFqQZE*;on&#)BaWW0Y(F_Xp)W!PZEirfsVTI$3@o&4 zXY`fMgX8Fnz*fmNHt4l3kyM<0_S&=00$)U);CZjdt@j>W{^ge&?%!b>k4VnM4*rB9 z3x2rqNd3};Ll8F}*_!dlDt@rP-xn%=*v2D4{1A#P_~FJQn;DNd1Zf6>)H@zYLIx~q z*)Ie6uLPO!6ePeTeEKLcQj#p-iW1`~EirO7A4AYm#b+rTlqI+8cmy+eh)>kJK8y1k zJe>OHkDq_?ZyGM=w8c2zFQz`{^S76rjg(WS$bwTp^VxstH^O;aoaIzR+_fI`8++YX zrXe+2oyYy!{21S)v61h6^L(!{naZ4Z z%sI;Jg{j5idJoho@`^n;XC-sC2}AH1`)bbK&!Q)%c9D9a^1KtFYO@+1)&p0S;VkfR zbrPP9_hYu~Un0Pt%Arx1N=nlL$C3UTN!fXB|AAbjYe0`{Vd|efMEEHMz z_*EYtm;Kpm z4WaV!!7BP?p~%9=zw?ckgDYg!)+YyJH?}|CXnifm$&Qoj;bd>kKk1h#Z{HXyKlfI_ z&q9%fpWA(M_6YQuoV9IM$2aRe*1Bq*J&i{5w$qGpZFJi_OsCZ@uMb+QW(>L@89!4$ zc9X}~uU&pR1pCx}3tvmlVxJOoj1Ff4Mq; zdA(E9;x@O|%A)EPy>=3I<~N7RzfWoYtz3sP$iG67g@22_etE55ib6?o&6TTj9ZIMB zYBo35>q*De!MUnFSHR|K;nAP}e9WBhm7bs2K5WU{b9)5iIw=ai)A2z+7oNn_Kw}r~j)r_ZwA`3tJ$J2Hnwc5Da zo<^g2+v&-gZCN)pY+o1CyxV?OZ}7Ir-PD)8HB`Q?M$au2S@_zo=PvuJ)yB)^c#Y;? zrw?=dyY+r<;9t>m(|_e{q4KZI&n>7A6N)VSyM6uK(y-VPFAQe5ku%p#ksX)A@;a~I zSET@Ii?GbokBvTUw9)5bwKpr!aHVRlKvTp!?huAA6gRg2|f8X?;P;tYy zZ+Z|ngdz)WxP8-?(5G&8%%JhAPY9>f!&$bu(s-*ks1 zZahwTdOs%aQb_ArKf)WYqc+&OX7o;Z+egkxskH1QW{)rWP?;9a6~+3|*7?5( zUZAhlAl}lROdjFdC`Fv5)r+56c{?Z(Maal>Xe4&sJTWWfz*H{J@nvBMLGCtIUw@jL@h zK>px5k4JIt4;4>rcH|PKAmiHoI{UM}#5^ zj`;N<&Tj0+HnZ@smmSfaAN^UBgPeYRSQ}PHa5<;q?MRfwIr_l5u%{iG|1w53i60qJ8C=Hjh9uy zZ#T|*IQ3|H|4*A^H!*K38t=J_m6?59X_Iz#<7>}uTv8WgoJ2cNE>!%m*^PtvArx8g!`Y1; zW;or-9Ap`;@(|wsKkQ@Ds_3^H4}^*vHoI{UH-sV!ZaBN~R@jZ54AP8o(oXb4feeE6 zXHNO;#`#e3#AY`R;)zgX!4qdU-mKl&VTr?%tNW9=K4$yPH&#*Y*CPJ4_j)+>)X%^8y~|CRZ!9^B-B>8H;FN!y zs9^!iSca&LB8@3a5wkjGGIfr(jp-}O|W;YJvhfrj}4`(-anBjCQ;puB5F^h_m#t!xX;b?2pCZ@eMD6pGK)D9YQIXT zeOgqA;fhBB+f6a-xNb|{O68Jxz+NjZk-QZp6HVn zk=L9W#ee9wXlKLj4h#6t{V@2WT^ literal 0 HcmV?d00001 diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx.golden.json b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx.golden.json new file mode 100644 index 00000000000..b1dda71c553 --- /dev/null +++ b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-loadimage.evtx.golden.json @@ -0,0 +1,94 @@ +[ + { + "@timestamp": "2020-10-28T02:39:26.374Z", + "event": { + "category": [ + "process" + ], + "code": 7, + "kind": "event", + "module": "sysmon", + "provider": "Microsoft-Windows-Sysmon", + "type": [ + "change" + ] + }, + "file": { + "code_signature": { + "signed": true, + "status": "Valid", + "subject_name": "Microsoft Windows", + "valid": true + }, + "directory": "C:\\Windows\\System32", + "extension": "dll", + "hash": { + "md5": "c7c45610f644906e6f7d664ef2e45b08", + "sha1": "9955a1c071c44a7ceecc0d928a9cfb7f64cc3f93", + "sha256": "4808f1101f4e42387d8ddb7a355668bae3bf6f781c42d3bcd82e23446b1deb3e" + }, + "name": "IDStore.dll", + "path": "C:\\Windows\\System32\\IDStore.dll", + "pe": { + "company": "Microsoft Corporation", + "description": "Identity Store", + "file_version": "10.0.17763.1 (WinBuild.160101.0800)", + "imphash": "194f3797b52231028c718b6d776c6853", + "original_file_name": "IdStore.dll", + "product": "Microsoft® Windows® Operating System" + } + }, + "host": { + "name": "vagrant" + }, + "log": { + "level": "information" + }, + "process": { + "entity_id": "{9f32b55f-d9de-5f98-f006-000000000600}", + "executable": "C:\\Windows\\System32\\dllhost.exe", + "name": "dllhost.exe", + "pid": 5184 + }, + "related": { + "hash": [ + "9955a1c071c44a7ceecc0d928a9cfb7f64cc3f93", + "c7c45610f644906e6f7d664ef2e45b08", + "4808f1101f4e42387d8ddb7a355668bae3bf6f781c42d3bcd82e23446b1deb3e", + "194f3797b52231028c718b6d776c6853" + ] + }, + "winlog": { + "api": "wineventlog", + "channel": "Microsoft-Windows-Sysmon/Operational", + "computer_name": "vagrant", + "event_data": { + "Company": "Microsoft Corporation", + "Description": "Identity Store", + "FileVersion": "10.0.17763.1 (WinBuild.160101.0800)", + "Product": "Microsoft® Windows® Operating System", + "RuleName": "-", + "Signature": "Microsoft Windows", + "SignatureStatus": "Valid", + "Signed": "true" + }, + "event_id": 7, + "process": { + "pid": 1676, + "thread": { + "id": 4796 + } + }, + "provider_guid": "{5770385f-c22a-43e0-bf4c-06f5698ffbd9}", + "provider_name": "Microsoft-Windows-Sysmon", + "record_id": 10685, + "user": { + "domain": "NT AUTHORITY", + "identifier": "S-1-5-18", + "name": "SYSTEM", + "type": "Well Known Group" + }, + "version": 3 + } + } +] \ No newline at end of file diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx new file mode 100644 index 0000000000000000000000000000000000000000..d1fd4fd70efe01e33d734af408d53c3dc92d9eb2 GIT binary patch literal 69632 zcmeHQ3vgW3c|LcgUF+f1!$!mSY_t4=f2o<)euAEyFEM0 zJD+5l*j6D;ZvSJ4f5^Vidk^5=!L0mTi|aRSA;~3KX^h%&@!hZ5=i3ps3voV?6%WJA zb$elnis5oCw84amskq_SD*6oq6EV=dE(f5=*T=`uXtFI#!-rr6+Fszov(u z`O(v_pFM>yVW?E4xDBWLKyFVgjfzfjR1AtSF@@{TV2^#=e)z5Os5mZ0#Rwjz@gQn7 zibDuCB__lMu^zv)=*OKwgxQbt8XSjlXHtygYFr#d$QoRaAfKZCl`^)}uc6FuTq#5^TaL<3JvPv3-m=^FjTi6XH}rhXJ@4ItDc zo;G3c(v5N39~6om$m}Q{3Q<-jtk{iqd$_8qP;3@QhqK+@cg>K>l&qhTz0KE`gzM_Edw9t&N<<1VP9fuL zEmQbX6>r%RQ6A%zFV`HCEilu_9Seqi71uoa!r)sI>&ev0n@!Eygh=Si)_1 zQ4hTj))b)0(MC7z;Tpo;B!CzE=R*x-!H^V zmk4o@e23$f3KM10+mUDY5LGSq;NBSOb{vg8EvoHCI*RKtv9D!f&$_2hw?Ek(J3D;u z+b`t%{(9NlSQP9Psx5MSP($_uV~MvkLEw4e&#%5Fde4)OKV5U}Uq6a!Vkp$ygE&QD z$gSfJHY+`3cgSe>;|WK@Rvbxt*}Co?FT@DNMEUf$u*WzGMy(>W=p)F%;LJ!EAxU9H zueINQUncYtROJ*(On25u964gxYCEi~dKGhNR#Vo)X(S#I<$UKV`yEt}&W*SE4~R&- zKol|jYCAk@-_Cs$O^W1Z2>Fl1(ToD~(uLCISGtGSNIaV3oeqR2QDe)Vn5nNwJeH$; z6v-JtnQ}OZ#Fyj<%dv@Fj(zJ^ge_8Go9*vp`PkoCfp)`O3}GKb_z|JMN7JuJyg0{~ zsFYYzY}ws17M2(~6iduE#dg`JIdZb+cimZJy$OnEF*<YTduw@pm%28b@rl*LgqK9O;zFjLUA#& z8^OR_V-M0Cz^}pib=bjZ$w7M^ju&I^z`h3irPw*3bMR+)v>fr<_B~!RF7DNmZ6xbh zEt2xTPBe>zyt@I{wO~k-U@B`wy}Y;Y8+CuO;}iGqQ_NjdB+3NH-3GBYYpmbvVo3Pz zUbKO6*$UW82N2_+q~T1pM(#azoJ^@4=#0x#2noOOyD zv9AdzpEUZtX1Q~f&l==Mm9QrLX_Td#m)MK3I0YtqH~x1Z582OIYcG%Azwed5R|rHO zEwrrd`1%2q?Gg0v9<-$tO7?2ehBEFzt<|F*lBkIWjFNTunnwJ$;Jg+g8u4E*k1hDG z#gj%nYr?Su|4sJ2HpEabn1i1o2NOsUv{Fn}1hpHsiV4&-%a-+`+lhZGBOmAvtx6q< z5YG{5T#a)-gdyo`NMr$~N905l?4yB5g8Whdf>?n^IgkS}$SjCq6N2PI3i2~&M+lcA zn$XB#6oGRgf^J4O3lhi|0rVlxAkM!ZdTBAa`^4yeZF-mk%FF0OqeD#6XIlgEg;WfeL3c5)i-7*_km z5Xj4?Jnu_RG&={g^7kCW-1(6FRdzf%d46&|?l@NYeTy#+2>l(Jmkz)Nq2rkaFNCg_ z01OfO{Qz9C%~O`-dp#{H53CWoWP*4_8-Ik(S!hhcFLSpWi88a>o)1n5-D)#fB*UxQ z*d=sLW#Vw*neCq3vmbbBDld$)*%Rk_zc@qU9wJ6}`AU+VUA_TWDD?LQwttfQPhun{#eP`YE+6ulHU5M$wJ9BS$W=vG>#!3W5~lHq&tO_Z^c(0!~a~6sLB)j zE2TyIrL>7d)cWTmqtx-lFr??{NMIsegC)FY7V0xb|7ZTGrLg= z?8{7t&lGwVI0qcXZei!fBd;|(pW3zrs_MAG{X;lkj$I1IMfUvVc*kn`0#I)UmlBCGKlIwDjBeoVwjfOO+mup{2kz zzM3+j<$VF#GsFXKY1(^G;_gpNDOZ|}mbQSR)&k*Dro_2e&bvMXTH0gN(%%PJ^T()8 z>3~AG1E`@955kp1QzUTkL!i2O8>)!{G6o{(6C_rf%M225TCsnMU(AR1-5m+P=hScR_tu zP3`2Sn!Wu;PB^h9j1=W&sHUNs3r01m8{P=HZKK4R-vQR#WmC=9LDm)$YwG5mUWI0<_h}6pNt?VsxT2`r$?=+RN=3 zo#-$Y-u_z6{xX2Y(uvmWyeNiLKuZZ;!`&ZYH&Oy4C4kL}zmFxO$92K#alKWXZ8=@} z+UHzS0@!BB%}5E1Y&}r64k<-w3~MZ>wf`lG-i+mRmKI}xy?@5)IfGb@<+RfmWz;g3 z)66 z!>i>wDs5(GvyG0lVBQu^Z zTl;#EyMEZSOgMmMil!Gu{s0$lHNTdSLfE zi68Y+)M^%sk9Pg-TK(k(UcA`>DnByg-Av03h*{klB*XB}wLzQ!YGer#4YtF_MG6+Y zM2;!kuLfGAr|5Q^rSPLaE86#V;aL+fBz-LtdGgmOZV!!!=>a-{`1)|%f^y#tA7j1Z zCiv#+!G90VJ5d*GDfE5yijw-2fuW;3tz+gk=%?Gm%-P8_oJac+vt2aN+A>gRjlX zk#0K+gRe7dY#6OC$X4nxM!0o$48G1T5reO@^k$T&n)fcgz7CpQ^p@h(`r^H?T3-_g z8_vr==j>XfU6RJv^EYSbkFOhm_#2>Gl#dmZwr$e3PF(RYffWn6zIy^XU<0uG8HV-K zuw0{ZQfh?_%T*KklByV!;I_|F#k)7$Rf*JjTm{}9Wbhn+=UNG~E|m_eUHOXJ#&Y19kX zROCO;)%ufci)@{0OW@!)YpvN|QSDeA3ZB%&S+hEnLDdFT8&vIss#is_S6cjsznyfU z>LPpCF}hwx*DFt5FWTPc!#9%i(0t?dHkICmB~HUqX}9fnZfdEUwTMIJtbK5uf!_B| zj1MkDEe*9a)Y6Aq{>I~jtM@-{aZyXK+j29O$A(%4QcI)bWpuoZj@JPEyqAk2`pw^r zG1D!TH!|q+NCus+53Z%a4d?EIOUa-qEuKFt;V*lpUnS#%%PSFf7#{#Xglck0Ob2eFbj~;={mptaA~$x>~WysW}JTjyYWbE{BU9Z(0HWwkmL>w z7iAZ$N9xTUjjV0Y-E!QyZWSh;7!x%_Ztd!)`r!(sh!HN)3aBlKr#pc&HytsbJ``CSV)2o>k6jdh{=;cAu| zSxwMRYJe6~3+76x!=$`#DX+~WS(*VBJwVucmbo71@5gR%wZYW}SC<(7))+jT{;k_RI$1+6f8>9^ z_~`T2t1wo02?{Y$HR=eXlVxyqAg=cJzqL1nPF4drM`~EtU{8X>Am?}nn$ufsovgor z@|4oa>VuZ@AhhPw@Z@;E+|PasE*wt2UZjMsv$(Zuw|#-RY54-<){#EhkMav}vl5RU4WbNK=jGmeJfY znp;M5D|=0h^Ho+&D`Cp2@4&9DhgabIX6#|DhZ%M@D-eY^nk3?&%_kAZFM&9A+K6Kl zSo^S6AjiZ3jNhZc7e|0Hh)-y#ORHUtFXl&X-2;t?F>x5$O;f-#2_PS)G>E%L@lQ)( zYDSIW-XT0A;u%MdXw}L5aK(lLIQAn3-qY5{d!TMXrfahde4< zWLh5A;|^DwXqI@H*2*oo=BkWcI7?!u)iQ0B+wjk|DA>Na(#83*&V**3BPZ}hTF(7Z zoVOcmDuc-lCO4Se2a_N6SW|s&nH!T!>W@L$U~+@WL6O-Ij7Kly(Q6WXbtGOYEZPnk zk6yqCMt{uf5&V3`dZCzpzkXq&if5=Y`^qGo4G!J(=V z@|gV9NKBqa%-M0(AP-yr;n@-KljzQXMdd!dkN5~x!7yXwS|{;pc378&)&)H;aN z5hEULkoT!;M!#8Yc#pqYpjEE<*oiaZRE8v$vC9 zFMPx`AB=HNZtr3ptB)0YtM|YD=6#>4D1GsJX9oW4x*AIjc-Hjv8k~0AQH)`o!e|n^ z4iL>L>_SAqHMipEwzyx7J3baFeR!#VrUymhLUG-Vp7&Szy&naIW}>7Lk+`Ma4W0H4 zlG>A6lN*S{!R3(S@ARZy?U!~OX;b!2N$!4;9ajP3Qq@mBsth}Msv_+4Ga0;&h;ycb z+V$%DCgm3nfaYorhuTc^O{%s84u1Neff>zo`c`4IaP*l;1kfY2u>H0mW8G#%5U^Rl+h4oaUtC>?e+1>U1ym~y`laYzp&-s7t$GAI@qSo3)g*0%6b74{zOMG- zF}GNpnB6HISR*+1JB(dz2^{=rrf%G$c56z@l2kkftpqv$u1atSrN}MvCdzKzIw4-i zyJPGN*uL_6Z)~{_I`y*oS)X5>I&%ZoC0d3j7J3)=h=|#{0Ao;vy;AnM?A1VJboiqn zrb3isx5SUdYTm9bTBfs`IH^VM_a5)PZ|j$zxMpL)mEVr{pfsFsEVYX7jr{s+`}lD}E(E?38NUd- z{#br6&#C!=uE;IQ0&-iCDLE3?S^FTK?LjZ!Ew+ix_}?k|V8bcID>g*G@9QPMdKN`~ z`lKrQLNGbp{N)3xojBzSH$ZAJE{fbM|w>9lFhJ(ZLz@; zEgkDxu~uq`P>V#Rll`PYKGE6H;VHx8#xmaYIi|{97wZR{uoCu#^eL`+rgn2(m8f}>0EZK`(^q&r3H?%&^d&hPRyE8 zH*!vEoerCl^4-}|Yr`AVV>=Qe{@wTzO2#SIADlJj4um;?c!y>UnW28O<`MM#L7DpS_scJvu1T{|;Hg5L)k{P}R{r?ub z-`}#|+IlX0&AHP#ouXgAPhdpSIWu%F1dlYAG3k7!b7=0CaMHO+yS;yRLEF0@ozp1> zwRfE}L+1>gGjwhq=$zzhHJ(e`{r(^CUcSh5PN(SC?}_Jh&J3M1bPky9-0&RvIv1Ur r(s=H(3)$ZN=-g#?ib3sN=giPKL+1>gn+H1ged4*5LTq{yc<%oJX+982 literal 0 HcmV?d00001 diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx.golden.json b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx.golden.json new file mode 100644 index 00000000000..7de72129b33 --- /dev/null +++ b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-12-processcreate.evtx.golden.json @@ -0,0 +1,96 @@ +[ + { + "@timestamp": "2020-10-27T20:00:14.32Z", + "event": { + "category": [ + "process" + ], + "code": 1, + "kind": "event", + "module": "sysmon", + "provider": "Microsoft-Windows-Sysmon", + "type": [ + "start", + "process_start" + ] + }, + "host": { + "name": "vagrant" + }, + "log": { + "level": "information" + }, + "process": { + "args": [ + "C:\\Windows\\system32\\notepad.exe" + ], + "command_line": "\"C:\\Windows\\system32\\notepad.exe\" ", + "entity_id": "{9f32b55f-7c4e-5f98-5803-000000000500}", + "executable": "C:\\Windows\\System32\\notepad.exe", + "hash": { + "sha1": "b6d237154f2e528f0b503b58b025862d66b02b73" + }, + "name": "notepad.exe", + "parent": { + "args": [ + "C:\\Windows\\Explorer.EXE" + ], + "command_line": "C:\\Windows\\Explorer.EXE", + "entity_id": "{9f32b55f-6fdf-5f98-7000-000000000500}", + "executable": "C:\\Windows\\explorer.exe", + "name": "explorer.exe", + "pid": 4212 + }, + "pe": { + "company": "Microsoft Corporation", + "description": "Notepad", + "file_version": "10.0.17763.475 (WinBuild.160101.0800)", + "original_file_name": "NOTEPAD.EXE", + "product": "Microsoft® Windows® Operating System" + }, + "pid": 3616, + "working_directory": "C:\\Users\\vagrant\\" + }, + "related": { + "hash": "b6d237154f2e528f0b503b58b025862d66b02b73", + "user": "vagrant" + }, + "user": { + "domain": "VAGRANT", + "name": "vagrant" + }, + "winlog": { + "api": "wineventlog", + "channel": "Microsoft-Windows-Sysmon/Operational", + "computer_name": "vagrant", + "event_data": { + "Company": "Microsoft Corporation", + "Description": "Notepad", + "FileVersion": "10.0.17763.475 (WinBuild.160101.0800)", + "IntegrityLevel": "Medium", + "LogonGuid": "{9f32b55f-6fdd-5f98-e7c9-020000000000}", + "LogonId": "0x2c9e7", + "Product": "Microsoft® Windows® Operating System", + "RuleName": "-", + "TerminalSessionId": "1" + }, + "event_id": 1, + "process": { + "pid": 7144, + "thread": { + "id": 6876 + } + }, + "provider_guid": "{5770385f-c22a-43e0-bf4c-06f5698ffbd9}", + "provider_name": "Microsoft-Windows-Sysmon", + "record_id": 20, + "user": { + "domain": "NT AUTHORITY", + "identifier": "S-1-5-18", + "name": "SYSTEM", + "type": "Well Known Group" + }, + "version": 5 + } + } +] \ No newline at end of file diff --git a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-9.01.evtx.golden.json b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-9.01.evtx.golden.json index 0ba347499a8..9f1d14c88ab 100644 --- a/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-9.01.evtx.golden.json +++ b/x-pack/winlogbeat/module/sysmon/test/testdata/sysmon-9.01.evtx.golden.json @@ -126,6 +126,12 @@ "name": "services.exe", "pid": 488 }, + "pe": { + "company": "Sysinternals - www.sysinternals.com", + "description": "System activity monitor", + "file_version": "9.01", + "product": "Sysinternals Sysmon" + }, "pid": 4860, "working_directory": "C:\\Windows\\system32\\" }, @@ -215,6 +221,12 @@ "name": "svchost.exe", "pid": 560 }, + "pe": { + "company": "Microsoft Corporation", + "description": "Sink to receive asynchronous callbacks for WMI client application", + "file_version": "6.3.9600.16384 (winblue_rtm.130821-1623)", + "product": "Microsoft® Windows® Operating System" + }, "pid": 5028, "working_directory": "C:\\Windows\\system32\\" }, @@ -404,6 +416,12 @@ "name": "svchost.exe", "pid": 560 }, + "pe": { + "company": "Microsoft Corporation", + "description": "WMI Provider Host", + "file_version": "6.3.9600.16384 (winblue_rtm.130821-1623)", + "product": "Microsoft® Windows® Operating System" + }, "pid": 4508, "working_directory": "C:\\Windows\\system32\\" }, From 2474f5b08096bf2734f5a2452a8c6e0031dd3384 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Wed, 28 Oct 2020 15:26:01 +0000 Subject: [PATCH 86/93] [CI] archive only if failed steps (#22220) --- Jenkinsfile | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index d12848b28cc..6fff22bfa09 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -330,6 +330,8 @@ def withBeatsEnv(Map args = [:], Closure body) { git config --global user.name "beatsmachine" fi''') } + // Skip to upload the generated files by default. + def upload = false try { // Add more stability when dependencies are not accessible temporarily // See https://github.com/elastic/beats/issues/21609 @@ -339,9 +341,12 @@ def withBeatsEnv(Map args = [:], Closure body) { cmd(label: 'Download modules to local cache - retry', script: 'go mod download', returnStatus: true) } body() + } catch(err) { + // Upload the generated files ONLY if the step failed. This will avoid any overhead with Google Storage + upload = true } finally { if (archive) { - archiveTestOutput(testResults: testResults, artifacts: artifacts, id: args.id) + archiveTestOutput(testResults: testResults, artifacts: artifacts, id: args.id, upload: upload) } // Tear down the setup for the permamnent workers. catchError(buildResult: 'SUCCESS', stageResult: 'SUCCESS') { @@ -419,16 +424,20 @@ def archiveTestOutput(Map args = [:]) { script: 'rm -rf ve || true; find . -type d -name vendor -exec rm -r {} \\;') } else { log(level: 'INFO', text: 'Delete folders that are causing exceptions (See JENKINS-58421) is disabled for Windows.') } junitAndStore(allowEmptyResults: true, keepLongStdio: true, testResults: args.testResults, stashedTestReports: stashedTestReports, id: args.id) - tarAndUploadArtifacts(file: "test-build-artifacts-${args.id}.tgz", location: '.') + if (args.upload) { + tarAndUploadArtifacts(file: "test-build-artifacts-${args.id}.tgz", location: '.') + } } - catchError(buildResult: 'SUCCESS', message: 'Failed to archive the build test results', stageResult: 'SUCCESS') { - def folder = cmd(label: 'Find system-tests', returnStdout: true, script: 'python .ci/scripts/search_system_tests.py').trim() - log(level: 'INFO', text: "system-tests='${folder}'. If no empty then let's create a tarball") - if (folder.trim()) { - // TODO: nodeOS() should support ARM - def os_suffix = isArm() ? 'linux' : nodeOS() - def name = folder.replaceAll('/', '-').replaceAll('\\\\', '-').replaceAll('build', '').replaceAll('^-', '') + '-' + os_suffix - tarAndUploadArtifacts(file: "${name}.tgz", location: folder) + if (args.upload) { + catchError(buildResult: 'SUCCESS', message: 'Failed to archive the build test results', stageResult: 'SUCCESS') { + def folder = cmd(label: 'Find system-tests', returnStdout: true, script: 'python .ci/scripts/search_system_tests.py').trim() + log(level: 'INFO', text: "system-tests='${folder}'. If no empty then let's create a tarball") + if (folder.trim()) { + // TODO: nodeOS() should support ARM + def os_suffix = isArm() ? 'linux' : nodeOS() + def name = folder.replaceAll('/', '-').replaceAll('\\\\', '-').replaceAll('build', '').replaceAll('^-', '') + '-' + os_suffix + tarAndUploadArtifacts(file: "${name}.tgz", location: folder) + } } } } From cffc81db9ae1f6ceab7c61ba5407e517165d1414 Mon Sep 17 00:00:00 2001 From: Lee Hinman <57081003+leehinman@users.noreply.github.com> Date: Wed, 28 Oct 2020 12:19:14 -0500 Subject: [PATCH 87/93] protect against accessing undefined variables in sysmon module (#22236) Closes #22219 --- CHANGELOG.next.asciidoc | 1 + .../module/sysmon/config/winlogbeat-sysmon.js | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 874fa29264a..1a3e1623533 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -397,6 +397,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix invalid IP addresses in DNS query results from Sysmon data. {issue}18432[18432] {pull}18436[18436] - Fields from Winlogbeat modules were not being included in index templates and patterns. {pull}18983[18983] - Add source.ip validation for event ID 4778 in the Security module. {issue}19627[19627] +- Protect against accessing undefined variables in Sysmon module. {issue}22219[22219] {pull}22236[22236] *Functionbeat* diff --git a/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js b/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js index d5921722210..9523b9171f6 100644 --- a/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js +++ b/x-pack/winlogbeat/module/sysmon/config/winlogbeat-sysmon.js @@ -303,6 +303,9 @@ var sysmon = (function () { return; } var exe = evt.Get(pathField); + if (!exe) { + return; + } evt.Put(nameField, path.basename(exe)); }; @@ -327,7 +330,11 @@ var sysmon = (function () { }; var addUser = function (evt) { - var userParts = evt.Get("winlog.event_data.User").split("\\"); + var userParts = evt.Get("winlog.event_data.User"); + if (!userParts) { + return; + } + userParts = userParts.split("\\"); if (userParts.length === 2) { evt.Delete("user"); evt.Put("user.domain", userParts[0]); @@ -406,6 +413,9 @@ var sysmon = (function () { // in the specified namespace. It also adds all the hashes to 'related.hash'. var addHashes = function (evt, namespace, hashField) { var hashes = evt.Get(hashField); + if (!hashes) { + return; + } evt.Delete(hashField); hashes.split(",").forEach(function (hash) { var parts = hash.split("="); From 713a503932d3fb7ce1eb2f508e13f7427dea36d6 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 29 Oct 2020 09:22:55 +0000 Subject: [PATCH 88/93] [CI] support windows-2008-r2 (#19791) --- auditbeat/Jenkinsfile.yml | 1 + filebeat/Jenkinsfile.yml | 1 + heartbeat/Jenkinsfile.yml | 11 +++++++++++ metricbeat/Jenkinsfile.yml | 1 + packetbeat/Jenkinsfile.yml | 11 +++++++++++ winlogbeat/Jenkinsfile.yml | 1 + x-pack/auditbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/elastic-agent/Jenkinsfile.yml | 11 +++++++++++ x-pack/filebeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/functionbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/metricbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/packetbeat/Jenkinsfile.yml | 11 +++++++++++ x-pack/winlogbeat/Jenkinsfile.yml | 11 +++++++++++ 13 files changed, 103 insertions(+) diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index be9663b8e8d..925b9ff0adc 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -52,6 +52,7 @@ stages: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. - "windows-2019" + #- "windows-2008-r2" https://github.com/elastic/beats/issues/19799 windows-2016: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index f13f0329345..322cea302c3 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -51,6 +51,7 @@ stages: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. - "windows-2019" + #- "windows-2008-r2" https://github.com/elastic/beats/issues/19795 windows-2016: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. diff --git a/heartbeat/Jenkinsfile.yml b/heartbeat/Jenkinsfile.yml index 6135df2d26f..9cebe5bbde2 100644 --- a/heartbeat/Jenkinsfile.yml +++ b/heartbeat/Jenkinsfile.yml @@ -83,3 +83,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test heartbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tag diff --git a/metricbeat/Jenkinsfile.yml b/metricbeat/Jenkinsfile.yml index 993d0681469..bdb9fd1a2bf 100644 --- a/metricbeat/Jenkinsfile.yml +++ b/metricbeat/Jenkinsfile.yml @@ -45,6 +45,7 @@ stages: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. - "windows-2019" + #- "windows-2008-r2" https://github.com/elastic/beats/issues/19800 windows-2016: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. diff --git a/packetbeat/Jenkinsfile.yml b/packetbeat/Jenkinsfile.yml index e956b7ddc6b..96aff7bbfb2 100644 --- a/packetbeat/Jenkinsfile.yml +++ b/packetbeat/Jenkinsfile.yml @@ -83,3 +83,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test packetbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/winlogbeat/Jenkinsfile.yml b/winlogbeat/Jenkinsfile.yml index a7620c47138..3514c68c81e 100644 --- a/winlogbeat/Jenkinsfile.yml +++ b/winlogbeat/Jenkinsfile.yml @@ -24,6 +24,7 @@ stages: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. - "windows-2019" + #- "windows-2008-r2" https://github.com/elastic/beats/issues/19798 windows-2016: mage: "mage build unitTest" platforms: ## override default labels in this specific stage. diff --git a/x-pack/auditbeat/Jenkinsfile.yml b/x-pack/auditbeat/Jenkinsfile.yml index a65ccfeeecd..8b5f5d298dc 100644 --- a/x-pack/auditbeat/Jenkinsfile.yml +++ b/x-pack/auditbeat/Jenkinsfile.yml @@ -84,3 +84,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test auditbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/elastic-agent/Jenkinsfile.yml b/x-pack/elastic-agent/Jenkinsfile.yml index d1e415a6e78..e3f33e109a9 100644 --- a/x-pack/elastic-agent/Jenkinsfile.yml +++ b/x-pack/elastic-agent/Jenkinsfile.yml @@ -83,3 +83,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test x-pack/elastic-agent for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/filebeat/Jenkinsfile.yml b/x-pack/filebeat/Jenkinsfile.yml index 460115bbe41..e677f15d225 100644 --- a/x-pack/filebeat/Jenkinsfile.yml +++ b/x-pack/filebeat/Jenkinsfile.yml @@ -84,3 +84,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test x-pack/filebeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/functionbeat/Jenkinsfile.yml b/x-pack/functionbeat/Jenkinsfile.yml index 5d8cf74c480..7339a700f41 100644 --- a/x-pack/functionbeat/Jenkinsfile.yml +++ b/x-pack/functionbeat/Jenkinsfile.yml @@ -81,3 +81,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test x-pack/functionbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 2fe5da311fa..4d00157b2e6 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -73,3 +73,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test x-pack/metricbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index baeb03a5e31..5f1ff2ce6b2 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -84,3 +84,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test x-pack/winlogbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags diff --git a/x-pack/winlogbeat/Jenkinsfile.yml b/x-pack/winlogbeat/Jenkinsfile.yml index f95e44395e1..966e00f4999 100644 --- a/x-pack/winlogbeat/Jenkinsfile.yml +++ b/x-pack/winlogbeat/Jenkinsfile.yml @@ -58,3 +58,14 @@ stages: - "windows-10" branches: true ## for all the branches tags: true ## for all the tags + windows-2008: + mage: "mage build unitTest" + platforms: ## override default labels in this specific stage. + - "windows-2008-r2" + when: ## Override the top-level when. + comments: + - "/test x-pack/winlogbeat for windows-2008" + labels: + - "windows-2008" + branches: true ## for all the branches + tags: true ## for all the tags From 554d5646b00668eb60b9fb3b1404bb5729722d20 Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Thu, 29 Oct 2020 13:18:44 +0000 Subject: [PATCH 89/93] Add support for different Azure Cloud environments in the metricbeat azure module (#21044) * mofidy doc * work * changelog * fix url * work * fmt update * err --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/azure.asciidoc | 21 ++++++- .../module/azure/_meta/docs.asciidoc | 21 ++++++- .../module/azure/billing/billing.go | 15 +---- .../metricbeat/module/azure/billing/client.go | 8 ++- .../module/azure/billing/client_test.go | 4 +- .../module/azure/billing/mock_service.go | 4 +- .../module/azure/billing/service.go | 12 ++-- x-pack/metricbeat/module/azure/client.go | 2 +- x-pack/metricbeat/module/azure/config.go | 63 +++++++++++++------ .../module/azure/monitor_service.go | 14 +++-- 11 files changed, 116 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1a3e1623533..db784002f81 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -769,6 +769,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add billing metricset into googlecloud module. {pull}20812[20812] {issue}20738[20738] - Move `compute_vm_scaleset` to light metricset. {pull}21038[21038] {issue}20985[20985] - Sanitize `event.host`. {pull}21022[21022] +- Add support for different Azure Cloud environments in the metricbeat azure module. {pull}21044[21044] {issue}20988[20988] - Add overview and platform health dashboards to Cloud Foundry module. {pull}21124[21124] - Release lambda metricset in aws module as GA. {issue}21251[21251] {pull}21255[21255] - Add dashboard for pubsub metricset in googlecloud module. {pull}21326[21326] {issue}17137[17137] diff --git a/metricbeat/docs/modules/azure.asciidoc b/metricbeat/docs/modules/azure.asciidoc index 42d4d619c02..8ec43e4be43 100644 --- a/metricbeat/docs/modules/azure.asciidoc +++ b/metricbeat/docs/modules/azure.asciidoc @@ -70,7 +70,26 @@ Required credentials for the `azure` module: `tenant_id`:: The unique identifier of the Azure Active Directory instance -Users can use the azure credentials keys if configured `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_TENANT_ID`, `AZURE_SUBSCRIPTION_ID` +The azure credentials keys can be used if configured `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_TENANT_ID`, `AZURE_SUBSCRIPTION_ID` + +`resource_manager_endpoint` :: +_string_ +Optional, by default the azure public environment will be used, to override, users can provide a specific resource manager endpoint in order to use a different azure environment. +Ex: +https://management.chinacloudapi.cn for azure ChinaCloud +https://management.microsoftazure.de for azure GermanCloud +https://management.azure.com for azure PublicCloud +https://management.usgovcloudapi.net for azure USGovernmentCloud + +`active_directory_endpoint` :: +_string_ +Optional, by default the associated active directory endpoint to the resource manager endpoint will be used, to override, users can provide a specific active directory endpoint in order to use a different azure environment. +Ex: +https://login.microsoftonline.com for azure ChinaCloud +https://login.microsoftonline.us for azure GermanCloud +https://login.chinacloudapi.cn for azure PublicCloud +https://login.microsoftonline.de for azure USGovernmentCloud + [float] == Metricsets diff --git a/x-pack/metricbeat/module/azure/_meta/docs.asciidoc b/x-pack/metricbeat/module/azure/_meta/docs.asciidoc index 01f6389ab93..9065f2f283f 100644 --- a/x-pack/metricbeat/module/azure/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/azure/_meta/docs.asciidoc @@ -62,7 +62,26 @@ Required credentials for the `azure` module: `tenant_id`:: The unique identifier of the Azure Active Directory instance -Users can use the azure credentials keys if configured `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_TENANT_ID`, `AZURE_SUBSCRIPTION_ID` +The azure credentials keys can be used if configured `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_TENANT_ID`, `AZURE_SUBSCRIPTION_ID` + +`resource_manager_endpoint` :: +_string_ +Optional, by default the azure public environment will be used, to override, users can provide a specific resource manager endpoint in order to use a different azure environment. +Ex: +https://management.chinacloudapi.cn for azure ChinaCloud +https://management.microsoftazure.de for azure GermanCloud +https://management.azure.com for azure PublicCloud +https://management.usgovcloudapi.net for azure USGovernmentCloud + +`active_directory_endpoint` :: +_string_ +Optional, by default the associated active directory endpoint to the resource manager endpoint will be used, to override, users can provide a specific active directory endpoint in order to use a different azure environment. +Ex: +https://login.microsoftonline.com for azure ChinaCloud +https://login.microsoftonline.us for azure GermanCloud +https://login.chinacloudapi.cn for azure PublicCloud +https://login.microsoftonline.de for azure USGovernmentCloud + [float] == Metricsets diff --git a/x-pack/metricbeat/module/azure/billing/billing.go b/x-pack/metricbeat/module/azure/billing/billing.go index 2f6025ef1cf..f43c709a8c0 100644 --- a/x-pack/metricbeat/module/azure/billing/billing.go +++ b/x-pack/metricbeat/module/azure/billing/billing.go @@ -5,10 +5,10 @@ package billing import ( - "time" - "github.com/pkg/errors" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/azure" + "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -32,19 +32,10 @@ type MetricSet struct { log *logp.Logger } -// Config options -type Config struct { - ClientId string `config:"client_id" validate:"required"` - ClientSecret string `config:"client_secret" validate:"required"` - TenantId string `config:"tenant_id" validate:"required"` - SubscriptionId string `config:"subscription_id" validate:"required"` - Period time.Duration `config:"period" validate:"nonzero,required"` -} - // New creates a new instance of the MetricSet. New is responsible for unpacking // any MetricSet specific configuration options if there are any. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - var config Config + var config azure.Config err := base.Module().UnpackConfig(&config) if err != nil { return nil, errors.Wrap(err, "error unpack raw module config using UnpackConfig") diff --git a/x-pack/metricbeat/module/azure/billing/client.go b/x-pack/metricbeat/module/azure/billing/client.go index a6e4ba83cb7..0c3bc78d187 100644 --- a/x-pack/metricbeat/module/azure/billing/client.go +++ b/x-pack/metricbeat/module/azure/billing/client.go @@ -8,6 +8,8 @@ import ( "fmt" "time" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/azure" + "github.com/pkg/errors" "github.com/Azure/azure-sdk-for-go/services/consumption/mgmt/2019-01-01/consumption" @@ -18,7 +20,7 @@ import ( // Client represents the azure client which will make use of the azure sdk go metrics related clients type Client struct { BillingService Service - Config Config + Config azure.Config Log *logp.Logger } @@ -29,8 +31,8 @@ type Usage struct { } // NewClient instantiates the an Azure monitoring client -func NewClient(config Config) (*Client, error) { - usageService, err := NewService(config.ClientId, config.ClientSecret, config.TenantId, config.SubscriptionId) +func NewClient(config azure.Config) (*Client, error) { + usageService, err := NewService(config) if err != nil { return nil, err } diff --git a/x-pack/metricbeat/module/azure/billing/client_test.go b/x-pack/metricbeat/module/azure/billing/client_test.go index a0eb6c0d31c..fd93cedad9d 100644 --- a/x-pack/metricbeat/module/azure/billing/client_test.go +++ b/x-pack/metricbeat/module/azure/billing/client_test.go @@ -11,10 +11,12 @@ import ( "github.com/Azure/azure-sdk-for-go/services/consumption/mgmt/2019-01-01/consumption" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + + "github.com/elastic/beats/v7/x-pack/metricbeat/module/azure" ) var ( - config = Config{} + config = azure.Config{} ) func TestClient(t *testing.T) { diff --git a/x-pack/metricbeat/module/azure/billing/mock_service.go b/x-pack/metricbeat/module/azure/billing/mock_service.go index 4bbf4a16622..d0499701184 100644 --- a/x-pack/metricbeat/module/azure/billing/mock_service.go +++ b/x-pack/metricbeat/module/azure/billing/mock_service.go @@ -7,6 +7,8 @@ package billing import ( "github.com/stretchr/testify/mock" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/azure" + "github.com/elastic/beats/v7/libbeat/logp" "github.com/Azure/azure-sdk-for-go/services/consumption/mgmt/2019-01-01/consumption" @@ -27,7 +29,7 @@ type MockService struct { func NewMockClient() *Client { return &Client{ new(MockService), - Config{}, + azure.Config{}, logp.NewLogger("test azure monitor"), } } diff --git a/x-pack/metricbeat/module/azure/billing/service.go b/x-pack/metricbeat/module/azure/billing/service.go index ea7056e6c6f..17d8a9f9fd7 100644 --- a/x-pack/metricbeat/module/azure/billing/service.go +++ b/x-pack/metricbeat/module/azure/billing/service.go @@ -7,6 +7,8 @@ package billing import ( "context" + "github.com/elastic/beats/v7/x-pack/metricbeat/module/azure" + "github.com/Azure/azure-sdk-for-go/services/consumption/mgmt/2019-01-01/consumption" "github.com/Azure/go-autorest/autorest/azure/auth" @@ -22,14 +24,16 @@ type UsageService struct { } // NewService instantiates the Azure monitoring service -func NewService(clientId string, clientSecret string, tenantId string, subscriptionId string) (*UsageService, error) { - clientConfig := auth.NewClientCredentialsConfig(clientId, clientSecret, tenantId) +func NewService(config azure.Config) (*UsageService, error) { + clientConfig := auth.NewClientCredentialsConfig(config.ClientId, config.ClientSecret, config.TenantId) + clientConfig.AADEndpoint = config.ActiveDirectoryEndpoint + clientConfig.Resource = config.ResourceManagerEndpoint authorizer, err := clientConfig.Authorizer() if err != nil { return nil, err } - forcastsClient := consumption.NewForecastsClient(subscriptionId) - usageDetailsClient := consumption.NewUsageDetailsClient(subscriptionId) + forcastsClient := consumption.NewForecastsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionId) + usageDetailsClient := consumption.NewUsageDetailsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionId) forcastsClient.Authorizer = authorizer usageDetailsClient.Authorizer = authorizer service := &UsageService{ diff --git a/x-pack/metricbeat/module/azure/client.go b/x-pack/metricbeat/module/azure/client.go index dd48f962b59..668356bb724 100644 --- a/x-pack/metricbeat/module/azure/client.go +++ b/x-pack/metricbeat/module/azure/client.go @@ -32,7 +32,7 @@ type mapResourceMetrics func(client *Client, resources []resources.GenericResour // NewClient instantiates the an Azure monitoring client func NewClient(config Config) (*Client, error) { - azureMonitorService, err := NewService(config.ClientId, config.ClientSecret, config.TenantId, config.SubscriptionId) + azureMonitorService, err := NewService(config) if err != nil { return nil, err } diff --git a/x-pack/metricbeat/module/azure/config.go b/x-pack/metricbeat/module/azure/config.go index 00f1af56126..3748e89abec 100644 --- a/x-pack/metricbeat/module/azure/config.go +++ b/x-pack/metricbeat/module/azure/config.go @@ -7,20 +7,38 @@ package azure import ( "time" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/pkg/errors" ) +const ( + // DefaultBaseURI is the default URI used for the service Insights + DefaultBaseURI = "https://management.azure.com/" +) + +var ( + AzureEnvs = common.MapStr{ + "https://management.azure.com/": "https://login.microsoftonline.com/", + "https://management.usgovcloudapi.net/": "https://login.microsoftonline.us/", + "https://management.chinacloudapi.cn/": "https://login.chinacloudapi.cn/", + "https://management.microsoftazure.de/": "https://login.microsoftonline.de/", + } +) + // Config options type Config struct { - ClientId string `config:"client_id"` - ClientSecret string `config:"client_secret"` - TenantId string `config:"tenant_id"` - SubscriptionId string `config:"subscription_id"` - Period time.Duration `config:"period" validate:"nonzero,required"` - Resources []ResourceConfig `config:"resources"` - RefreshListInterval time.Duration `config:"refresh_list_interval"` - DefaultResourceType string `config:"default_resource_type"` - AddCloudMetadata bool `config:"add_cloud_metadata"` + ClientId string `config:"client_id" validate:"required"` + ClientSecret string `config:"client_secret" validate:"required"` + TenantId string `config:"tenant_id" validate:"required"` + SubscriptionId string `config:"subscription_id" validate:"required"` + Period time.Duration `config:"period" validate:"nonzero,required"` + Resources []ResourceConfig `config:"resources"` + RefreshListInterval time.Duration `config:"refresh_list_interval"` + DefaultResourceType string `config:"default_resource_type"` + AddCloudMetadata bool `config:"add_cloud_metadata"` + ResourceManagerEndpoint string `config:"resource_manager_endpoint"` + ActiveDirectoryEndpoint string `config:"active_directory_endpoint"` } // ResourceConfig contains resource and metric list specific configuration. @@ -52,17 +70,24 @@ type DimensionConfig struct { } func (conf *Config) Validate() error { - if conf.SubscriptionId == "" { - return errors.New("no subscription ID has been configured") - } - if conf.ClientSecret == "" { - return errors.New("no client secret has been configured") - } - if conf.ClientId == "" { - return errors.New("no client ID has been configured") + if conf.ResourceManagerEndpoint == "" { + conf.ResourceManagerEndpoint = DefaultBaseURI } - if conf.TenantId == "" { - return errors.New("no tenant ID has been configured") + if conf.ActiveDirectoryEndpoint == "" { + ok, err := AzureEnvs.HasKey(conf.ResourceManagerEndpoint) + if err != nil { + return errors.Wrap(err, "No active directory endpoint found for the resource manager endpoint selected.") + } + if ok { + add, err := AzureEnvs.GetValue(conf.ResourceManagerEndpoint) + if err != nil { + return errors.Wrap(err, "No active directory endpoint found for the resource manager endpoint selected.") + } + conf.ActiveDirectoryEndpoint = add.(string) + } + if conf.ActiveDirectoryEndpoint == "" { + return errors.New("no active directory endpoint has been configured") + } } return nil } diff --git a/x-pack/metricbeat/module/azure/monitor_service.go b/x-pack/metricbeat/module/azure/monitor_service.go index c3ed4e2fa43..bb77712b49e 100644 --- a/x-pack/metricbeat/module/azure/monitor_service.go +++ b/x-pack/metricbeat/module/azure/monitor_service.go @@ -29,16 +29,18 @@ type MonitorService struct { const metricNameLimit = 20 // NewService instantiates the Azure monitoring service -func NewService(clientId string, clientSecret string, tenantId string, subscriptionId string) (*MonitorService, error) { - clientConfig := auth.NewClientCredentialsConfig(clientId, clientSecret, tenantId) +func NewService(config Config) (*MonitorService, error) { + clientConfig := auth.NewClientCredentialsConfig(config.ClientId, config.ClientSecret, config.TenantId) + clientConfig.AADEndpoint = config.ActiveDirectoryEndpoint + clientConfig.Resource = config.ResourceManagerEndpoint authorizer, err := clientConfig.Authorizer() if err != nil { return nil, err } - metricsClient := insights.NewMetricsClient(subscriptionId) - metricsDefinitionClient := insights.NewMetricDefinitionsClient(subscriptionId) - resourceClient := resources.NewClient(subscriptionId) - metricNamespaceClient := insights.NewMetricNamespacesClient(subscriptionId) + metricsClient := insights.NewMetricsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionId) + metricsDefinitionClient := insights.NewMetricDefinitionsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionId) + resourceClient := resources.NewClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionId) + metricNamespaceClient := insights.NewMetricNamespacesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionId) metricsClient.Authorizer = authorizer metricsDefinitionClient.Authorizer = authorizer resourceClient.Authorizer = authorizer From d8e475543a7a3077a05f1bf99883d86930017872 Mon Sep 17 00:00:00 2001 From: kaiyan-sheng Date: Thu, 29 Oct 2020 07:19:43 -0600 Subject: [PATCH 90/93] Fix awscloudwatch input documentation (#22247) --- x-pack/filebeat/docs/inputs/input-awscloudwatch.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/filebeat/docs/inputs/input-awscloudwatch.asciidoc b/x-pack/filebeat/docs/inputs/input-awscloudwatch.asciidoc index 415721b54f0..3553947fb87 100644 --- a/x-pack/filebeat/docs/inputs/input-awscloudwatch.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-awscloudwatch.asciidoc @@ -88,7 +88,7 @@ will sleep for 1 minute before querying for new logs again. ==== `api_timeout` The maximum duration of AWS API can take. If it exceeds the timeout, AWS API will be interrupted. The default AWS API timeout for a message is 120 seconds. -The minimum is 0 seconds. The maximum is half of the visibility timeout value. +The minimum is 0 seconds. [float] ==== `api_sleep` From 82752daf99c0161956c5a466bae7975f420325ef Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 29 Oct 2020 14:53:02 +0000 Subject: [PATCH 91/93] [CI] enable x-pack/packetbeat in the CI (#22252) --- Jenkinsfile.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile.yml b/Jenkinsfile.yml index 3edd9a75d5c..22fd31ae4b1 100644 --- a/Jenkinsfile.yml +++ b/Jenkinsfile.yml @@ -16,11 +16,11 @@ projects: - "x-pack/functionbeat" - "x-pack/libbeat" - "x-pack/metricbeat" + - "x-pack/packetbeat" - "x-pack/winlogbeat" - "dev-tools" ##- "x-pack/heartbeat" It's not yet in the 1.0 pipeline. ##- "x-pack/journalbeat" It's not yet in the 1.0 pipeline. - ##- "x-pack/packetbeat" It's not yet in the 1.0 pipeline. ## Changeset macros that are defined here and used in each specific 2.0 pipeline. changeset: From 231f5adc89c83f07224b495efd31af2da53a8fd1 Mon Sep 17 00:00:00 2001 From: Mariana Dima Date: Thu, 29 Oct 2020 15:07:48 +0000 Subject: [PATCH 92/93] Add interval documentation to `monitor` metricset (#22152) * mofidy doc * update doc * changelog * space * updaet * spelling --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/azure.asciidoc | 2 +- x-pack/metricbeat/module/azure/_meta/docs.asciidoc | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index db784002f81..815b8d809f1 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -385,6 +385,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - [Kubernetes] Remove redundant dockersock volume mount {pull}22009[22009] - Revert change to report `process.memory.rss` as `process.memory.wss` on Windows. {pull}22055[22055] - Add a switch to the driver definition on SQL module to use pretty names {pull}17378[17378] +- Add interval information to `monitor` metricset in azure. {pull}22152[22152] *Packetbeat* diff --git a/metricbeat/docs/modules/azure.asciidoc b/metricbeat/docs/modules/azure.asciidoc index 8ec43e4be43..08a14132e8f 100644 --- a/metricbeat/docs/modules/azure.asciidoc +++ b/metricbeat/docs/modules/azure.asciidoc @@ -97,7 +97,7 @@ https://login.microsoftonline.de for azure USGovernmentCloud [float] === `monitor` This metricset allows users to retrieve metrics from specified resources. Added filters can apply here as the interval of retrieving these metrics, metric names, -aggregation list, namespaces and metric dimensions. +aggregation list, namespaces and metric dimensions. The monitor metrics will have a minimum timegrain of 5 minutes, so the `period` for `monitor` metricset should be `300s` or multiples of `300s`. [float] === `compute_vm` diff --git a/x-pack/metricbeat/module/azure/_meta/docs.asciidoc b/x-pack/metricbeat/module/azure/_meta/docs.asciidoc index 9065f2f283f..78e932a0153 100644 --- a/x-pack/metricbeat/module/azure/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/azure/_meta/docs.asciidoc @@ -89,7 +89,7 @@ https://login.microsoftonline.de for azure USGovernmentCloud [float] === `monitor` This metricset allows users to retrieve metrics from specified resources. Added filters can apply here as the interval of retrieving these metrics, metric names, -aggregation list, namespaces and metric dimensions. +aggregation list, namespaces and metric dimensions. The monitor metrics will have a minimum timegrain of 5 minutes, so the `period` for `monitor` metricset should be `300s` or multiples of `300s`. [float] === `compute_vm` From 633285e75a0316e20fabddc0fa590d24db4952b7 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 29 Oct 2020 09:03:58 -0700 Subject: [PATCH 93/93] Update commands used in the quick start (#22248) * Update commands used in the quick start * fix typo --- .../docs/elastic-agent-configuration.asciidoc | 4 +- .../docs/tab-widgets/enroll-widget.asciidoc | 60 +++++++++--------- .../docs/tab-widgets/enroll.asciidoc | 10 +-- .../docs/tab-widgets/install-widget.asciidoc | 61 ++++++++++--------- .../docs/tab-widgets/install.asciidoc | 10 +-- 5 files changed, 76 insertions(+), 69 deletions(-) diff --git a/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc b/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc index 98ba4a9b424..76130470218 100644 --- a/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc +++ b/x-pack/elastic-agent/docs/elastic-agent-configuration.asciidoc @@ -21,8 +21,8 @@ When running the agent standalone, specify configuration settings in the the `elastic-agent.yml` file. Instead, use {fleet} in {kib} to change settings. -TIP: To get started quickly, you can use {fleet} to generate a standalone -configuration. For more information, see <>. +//TIP: To get started quickly, you can use {fleet} to generate a standalone +//configuration. For more information, see <>. [discrete] [[elastic-agent-output-configuration]] diff --git a/x-pack/elastic-agent/docs/tab-widgets/enroll-widget.asciidoc b/x-pack/elastic-agent/docs/tab-widgets/enroll-widget.asciidoc index 202c38913db..c347e6d2319 100644 --- a/x-pack/elastic-agent/docs/tab-widgets/enroll-widget.asciidoc +++ b/x-pack/elastic-agent/docs/tab-widgets/enroll-widget.asciidoc @@ -3,18 +3,6 @@
- - + +
+ id="mac-tab-enroll" + aria-labelledby="mac-enroll"> ++++ -include::enroll.asciidoc[tag=deb] +include::enroll.asciidoc[tag=mac] ++++ -
+ + diff --git a/x-pack/elastic-agent/docs/tab-widgets/enroll.asciidoc b/x-pack/elastic-agent/docs/tab-widgets/enroll.asciidoc index 479e8cfba7c..70ece5b29ef 100644 --- a/x-pack/elastic-agent/docs/tab-widgets/enroll.asciidoc +++ b/x-pack/elastic-agent/docs/tab-widgets/enroll.asciidoc @@ -7,7 +7,7 @@ integrations require root privileges to collect sensitive data. // end::enroll-tip[] [source,shell] ---- -elastic-agent enroll KIBANA_URL ENROLLMENT_KEY +elastic-agent install -f --kibana-url=KIBANA_URL --enrollment-token=ENROLLMENT_KEY ---- include::enroll.asciidoc[tag=where-description] @@ -19,7 +19,7 @@ include::enroll.asciidoc[tag=enroll-tip] [source,shell] ---- -elastic-agent enroll KIBANA_URL ENROLLMENT_KEY +elastic-agent install -f --kibana-url=KIBANA_URL --enrollment-token=ENROLLMENT_KEY ---- include::enroll.asciidoc[tag=where-description] @@ -31,7 +31,7 @@ include::enroll.asciidoc[tag=enroll-tip] [source,shell] ---- -./elastic-agent enroll KIBANA_URL ENROLLMENT_KEY +./elastic-agent install -f --kibana-url=KIBANA_URL --enrollment-token=ENROLLMENT_KEY ---- include::enroll.asciidoc[tag=where-description] @@ -43,7 +43,7 @@ include::enroll.asciidoc[tag=enroll-tip] [source,shell] ---- -./elastic-agent enroll KIBANA_URL ENROLLMENT_KEY +./elastic-agent install -f --kibana-url=KIBANA_URL --enrollment-token=ENROLLMENT_KEY ---- include::enroll.asciidoc[tag=where-description] @@ -58,7 +58,7 @@ and run: [source,shell] ---- -.\elastic-agent.exe enroll KIBANA_URL ENROLLMENT_KEY +.\elastic-agent.exe install -f --kibana-url=KIBANA_URL --enrollment-token=ENROLLMENT_KEY ---- include::enroll.asciidoc[tag=where-description] diff --git a/x-pack/elastic-agent/docs/tab-widgets/install-widget.asciidoc b/x-pack/elastic-agent/docs/tab-widgets/install-widget.asciidoc index c25fc7fadbe..4a5bd68c8d9 100644 --- a/x-pack/elastic-agent/docs/tab-widgets/install-widget.asciidoc +++ b/x-pack/elastic-agent/docs/tab-widgets/install-widget.asciidoc @@ -3,18 +3,6 @@
- - + +
+ id="mac-tab-install" + aria-labelledby="mac-install"> ++++ -include::install.asciidoc[tag=deb] +include::install.asciidoc[tag=mac] ++++ -
+ + diff --git a/x-pack/elastic-agent/docs/tab-widgets/install.asciidoc b/x-pack/elastic-agent/docs/tab-widgets/install.asciidoc index 824825f86df..9776155ceb2 100644 --- a/x-pack/elastic-agent/docs/tab-widgets/install.asciidoc +++ b/x-pack/elastic-agent/docs/tab-widgets/install.asciidoc @@ -7,6 +7,9 @@ endif::[] ifeval::["{release-state}"!="unreleased"] +IMPORTANT: To simplify upgrading to future versions of {agent}, we recommended +that you use the tarball distribution instead of the DEB distribution. + ["source","sh",subs="attributes"] ---- curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-{version}-amd64.deb @@ -25,6 +28,9 @@ endif::[] ifeval::["{release-state}"!="unreleased"] +IMPORTANT: To simplify upgrading to future versions of {agent}, we recommended +that you use the tarball distribution instead of the RPM distribution. + ["source","sh",subs="attributes"] ---- curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-{version}-x86_64.rpm @@ -66,10 +72,6 @@ curl -L -O https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-ag tar xzvf elastic-agent-{version}-linux-x86_64.tar.gz ---- -NOTE: We recommend that you use the DEB or RPM distribution, instead of the -tarball, to ensure that {agent} restarts automatically if the system is -rebooted. - endif::[] // end::linux[]