diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 37ea49e3a6d95..167830d3ed8b3 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 788960c76e150..0f2e70addd684 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -272,8 +272,8 @@ steps: env: BWC_VERSION: 8.14.3 - - label: "{{matrix.image}} / 8.15.4 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.4 + - label: "{{matrix.image}} / 8.15.5 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.15.5 timeout_in_minutes: 300 matrix: setup: @@ -286,7 +286,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.15.4 + BWC_VERSION: 8.15.5 - label: "{{matrix.image}} / 8.16.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.16.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 7b6a6ea72fe83..f68f64332426c 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -287,8 +287,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 8.15.4 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.4#bwcTest + - label: 8.15.5 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.15.5#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -297,7 +297,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 8.15.4 + BWC_VERSION: 8.15.5 retry: automatic: - exit_status: "-1" @@ -429,7 +429,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk21 - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -471,7 +471,7 @@ steps: ES_RUNTIME_JAVA: - openjdk21 - openjdk23 - BWC_VERSION: ["8.15.4", "8.16.0", "8.17.0", "9.0.0"] + BWC_VERSION: ["8.15.5", "8.16.0", "8.17.0", "9.0.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 2e77631450825..b4a4460ff5a80 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -14,7 +14,7 @@ BWC_VERSION: - "8.12.2" - "8.13.4" - "8.14.3" - - "8.15.4" + - "8.15.5" - "8.16.0" - "8.17.0" - "9.0.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index c6edc709a8ceb..7dad55b653925 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "8.15.4" + - "8.15.5" - "8.16.0" - "8.17.0" - "9.0.0" diff --git a/docs/changelog/112250.yaml b/docs/changelog/112250.yaml deleted file mode 100644 index edbb5667d4b9d..0000000000000 --- a/docs/changelog/112250.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 112250 -summary: Do not exclude empty arrays or empty objects in source filtering -area: Search -type: bug -issues: [109668] diff --git a/docs/changelog/113723.yaml b/docs/changelog/113723.yaml deleted file mode 100644 index 2cbcf49102719..0000000000000 --- a/docs/changelog/113723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 113723 -summary: Fix max file size check to use `getMaxFileSize` -area: Infra/Core -type: bug -issues: - - 113705 diff --git a/docs/changelog/114407.yaml b/docs/changelog/114407.yaml deleted file mode 100644 index 4c1134a9d3834..0000000000000 --- a/docs/changelog/114407.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114407 -summary: Fix synthetic source handling for `bit` type in `dense_vector` field -area: Search -type: bug -issues: - - 114402 diff --git a/docs/changelog/114533.yaml b/docs/changelog/114533.yaml deleted file mode 100644 index f45589e8de921..0000000000000 --- a/docs/changelog/114533.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 114533 -summary: Fix dim validation for bit `element_type` -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/114601.yaml b/docs/changelog/114601.yaml deleted file mode 100644 index d2f563d62a639..0000000000000 --- a/docs/changelog/114601.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 114601 -summary: Support semantic_text in object fields -area: Vector Search -type: bug -issues: - - 114401 diff --git a/docs/changelog/114934.yaml b/docs/changelog/114934.yaml new file mode 100644 index 0000000000000..68628993b1c80 --- /dev/null +++ b/docs/changelog/114934.yaml @@ -0,0 +1,6 @@ +pr: 114934 +summary: "[ES|QL] To_DatePeriod and To_TimeDuration return better error messages on\ + \ `union_type` fields" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/115181.yaml b/docs/changelog/115181.yaml deleted file mode 100644 index 65f59d5ed0add..0000000000000 --- a/docs/changelog/115181.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115181 -summary: Always check the parent breaker with zero bytes in `PreallocatedCircuitBreakerService` -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/115308.yaml b/docs/changelog/115308.yaml deleted file mode 100644 index 163f0232a3e58..0000000000000 --- a/docs/changelog/115308.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115308 -summary: "ESQL: Disable pushdown of WHERE past STATS" -area: ES|QL -type: bug -issues: - - 115281 diff --git a/docs/changelog/115430.yaml b/docs/changelog/115430.yaml deleted file mode 100644 index c2903f7751012..0000000000000 --- a/docs/changelog/115430.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115430 -summary: Prevent NPE if model assignment is removed while waiting to start -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/115459.yaml b/docs/changelog/115459.yaml deleted file mode 100644 index b20a8f765c084..0000000000000 --- a/docs/changelog/115459.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115459 -summary: Guard blob store local directory creation with `doPrivileged` -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/115510.yaml b/docs/changelog/115510.yaml deleted file mode 100644 index 1e71270e18f97..0000000000000 --- a/docs/changelog/115510.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 115510 -summary: Fix lingering license warning header in IP filter -area: License -type: bug -issues: - - 114865 diff --git a/docs/changelog/115834.yaml b/docs/changelog/115834.yaml deleted file mode 100644 index 91f9e9a4e2e41..0000000000000 --- a/docs/changelog/115834.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 115834 -summary: Try to simplify geometries that fail with `TopologyException` -area: Geo -type: bug -issues: [] diff --git a/docs/changelog/116031.yaml b/docs/changelog/116031.yaml deleted file mode 100644 index e30552bf3b513..0000000000000 --- a/docs/changelog/116031.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116031 -summary: Resolve pipelines from template on lazy rollover write -area: Data streams -type: bug -issues: - - 112781 diff --git a/docs/changelog/116219.yaml b/docs/changelog/116219.yaml deleted file mode 100644 index aeeea68570e77..0000000000000 --- a/docs/changelog/116219.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 116219 -summary: "[apm-data] Apply lazy rollover on index template creation" -area: Data streams -type: bug -issues: - - 116230 diff --git a/docs/changelog/116650.yaml b/docs/changelog/116650.yaml new file mode 100644 index 0000000000000..d314a918aede9 --- /dev/null +++ b/docs/changelog/116650.yaml @@ -0,0 +1,5 @@ +pr: 116650 +summary: Fix bug in ML autoscaling when some node info is unavailable +area: Machine Learning +type: bug +issues: [] diff --git a/docs/reference/connector/docs/_connectors-overview-table.asciidoc b/docs/reference/connector/docs/_connectors-overview-table.asciidoc index f25ea3deceeee..f5f8103349dde 100644 --- a/docs/reference/connector/docs/_connectors-overview-table.asciidoc +++ b/docs/reference/connector/docs/_connectors-overview-table.asciidoc @@ -44,7 +44,7 @@ NOTE: All connectors are available as self-managed <>|*GA*|8.12+|8.12+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/salesforce.py[View code] |<>|*GA*|8.10+|8.10+|8.11+|8.13+|8.13+|https://github.com/elastic/connectors/tree/main/connectors/sources/servicenow.py[View code] |<>|*GA*|8.9+|8.9+|8.9+|8.9+|8.9+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_online.py[View code] -|<>|*Beta*|8.15+|-|8.11+|8.13+|8.14+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] +|<>|*Beta*|8.15+|-|8.11+|8.13+|8.15+|https://github.com/elastic/connectors/tree/main/connectors/sources/sharepoint_server.py[View code] |<>|*Preview*|8.14+|-|-|-|-|https://github.com/elastic/connectors/tree/main/connectors/sources/slack.py[View code] |<>|*Preview*|8.14+|-|-|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/teams.py[View code] |<>|*Preview*|8.14+|-|8.11+|8.13+|-|https://github.com/elastic/connectors/tree/main/connectors/sources/zoom.py[View code] diff --git a/docs/reference/connector/docs/connectors-release-notes.asciidoc b/docs/reference/connector/docs/connectors-release-notes.asciidoc index 723671b049bf2..e1ed082365c00 100644 --- a/docs/reference/connector/docs/connectors-release-notes.asciidoc +++ b/docs/reference/connector/docs/connectors-release-notes.asciidoc @@ -4,7 +4,13 @@ Release notes ++++ -[INFO] +[NOTE] ==== -Prior to version 8.16.0, the connector release notes were published as part of the https://www.elastic.co/guide/en/enterprise-search/current/changelog.html[Enterprise Search documentation]. +Prior to version *8.16.0*, the connector release notes were published as part of the {enterprise-search-ref}/changelog.html[Enterprise Search documentation]. ==== + +*Release notes*: + +* <> + +include::release-notes/connectors-release-notes-8.16.0.asciidoc[] diff --git a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc index 95ff8223b4d20..21d0890e436c5 100644 --- a/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint-online.asciidoc @@ -87,14 +87,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -138,7 +140,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions @@ -546,14 +548,16 @@ Select an expiration date. (At this expiration date, you will need to generate a + ``` Graph API -- Sites.Read.All +- Sites.Selected - Files.Read.All - Group.Read.All - User.Read.All Sharepoint -- Sites.Read.All +- Sites.Selected ``` +NOTE: If the `Comma-separated list of sites` configuration is set to `*` or if a user enables the toggle button `Enumerate all sites`, the connector requires `Sites.Read.All` permission. + * **Grant admin consent**, using the `Grant Admin Consent` link from the permissions screen. * Save the tenant name (i.e. Domain name) of Azure platform. @@ -597,7 +601,7 @@ Refer to https://learn.microsoft.com/en-us/sharepoint/dev/general-development/ho Here's a summary of why we use these Graph API permissions: -* *Sites.Read.All* is used to fetch the sites and their metadata +* *Sites.Selected* is used to fetch the sites and their metadata * *Files.Read.All* is used to fetch Site Drives and files in these drives * *Groups.Read.All* is used to fetch groups for document-level permissions * *User.Read.All* is used to fetch user information for document-level permissions diff --git a/docs/reference/connector/docs/connectors-sharepoint.asciidoc b/docs/reference/connector/docs/connectors-sharepoint.asciidoc index f5590daa1e701..d7a2307a9db80 100644 --- a/docs/reference/connector/docs/connectors-sharepoint.asciidoc +++ b/docs/reference/connector/docs/connectors-sharepoint.asciidoc @@ -67,6 +67,9 @@ The following SharePoint Server versions are compatible: The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -133,7 +136,7 @@ The connector syncs the following SharePoint object types: [NOTE] ==== * Content from files bigger than 10 MB won't be extracted by default. Use the <> to handle larger binary files. -* Permissions are not synced. **All documents** indexed to an Elastic deployment will be visible to **all users with access** to that Elasticsearch Index. +* Permissions are not synced by default. Enable <> to sync permissions. ==== [discrete#es-connectors-sharepoint-sync-types] @@ -191,7 +194,7 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== @@ -254,6 +257,9 @@ Once connected, you'll be able to update these values in Kibana. The following configuration fields are required to set up the connector: +`authentication`:: +Authentication mode, either *Basic* or *NTLM*. + `username`:: The username of the account for the SharePoint Server instance. @@ -408,5 +414,5 @@ This connector is written in Python using the {connectors-python}[Elastic connec View the {connectors-python}/connectors/sources/sharepoint_server.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== diff --git a/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc new file mode 100644 index 0000000000000..7608336073176 --- /dev/null +++ b/docs/reference/connector/docs/release-notes/connectors-release-notes-8.16.0.asciidoc @@ -0,0 +1,53 @@ +[[es-connectors-release-notes-8-16-0]] +=== 8.16.0 connectors release notes + +[discrete] +[[es-connectors-release-notes-deprecation-notice]] +==== Deprecation notices + +* *Direct index access for connectors and sync jobs* ++ +IMPORTANT: Directly accessing connector and sync job state through `.elastic-connectors*` indices is deprecated, and will be disallowed entirely in a future release. + +* Instead, the Elasticsearch Connector APIs should be used. Connectors framework code now uses the <> by default. +See https://github.com/elastic/connectors/pull/2884[*PR 2902*]. + +* *Docker `enterprise-search` namespace deprecation* ++ +IMPORTANT: The `enterprise-search` Docker namespace is deprecated and will be discontinued in a future release. ++ +Starting in `8.16.0`, Docker images are being transitioned to the new `integrations` namespace, which will become the sole location for future releases. This affects the https://github.com/elastic/connectors[Elastic Connectors] and https://github.com/elastic/data-extraction-service[Elastic Data Extraction Service]. ++ +During this transition period, images are published to both namespaces: ++ +** *Example*: ++ +Deprecated namespace:: +`docker.elastic.co/enterprise-search/elastic-connectors:v8.16.0` ++ +New namespace:: +`docker.elastic.co/integrations/elastic-connectors:v8.16.0` ++ +Users should migrate to the new `integrations` namespace as soon as possible to ensure continued access to future releases. + +[discrete] +[[es-connectors-release-notes-8-16-0-enhancements]] +==== Enhancements + +* Docker images now use Chainguard's Wolfi base image (`docker.elastic.co/wolfi/jdk:openjdk-11-dev`), replacing the previous `ubuntu:focal` base. + +* The Sharepoint Online connector now works with the `Sites.Selected` permission instead of the broader permission `Sites.Read.All`. +See https://github.com/elastic/connectors/pull/2762[*PR 2762*]. + +* Starting in 8.16.0, connectors will start using proper SEMVER, with `MAJOR.MINOR.PATCH`, which aligns with Elasticsearch/Kibana versions. This drops the previous `.BUILD` suffix, which we used to release connectors between Elastic stack releases. Going forward, these inter-stack-release releases will be suffixed instead with `+`, aligning with Elastic Agent and conforming to SEMVER. +See https://github.com/elastic/connectors/pull/2749[*PR 2749*]. + +* Connector logs now use UTC timestamps, instead of machine-local timestamps. This only impacts logging output. +See https://github.com/elastic/connectors/pull/2695[*PR 2695*]. + +[discrete] +[[es-connectors-release-notes-8-16-0-bug-fixes]] +==== Bug fixes + +* The Dropbox connector now fetches the files from team shared folders. +See https://github.com/elastic/connectors/pull/2718[*PR 2718*]. \ No newline at end of file diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index e884c01dd3509..60c32cabdb5c1 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -95,13 +95,20 @@ and https://elasticsearch-benchmarks.elastic.co/#tracks/dense_vector[here] some of datasets and configurations that we use for our nightly benchmarks. [discrete] +[[dense-vector-preloading]] include::search-speed.asciidoc[tag=warm-fs-cache] The following file extensions are used for the approximate kNN search: +Each extension is broken down by the quantization types. -* `vec` and `veq` for vector values -* `vex` for HNSW graph -* `vem`, `vemf`, and `vemq` for metadata +* `vex` for the HNSW graph +* `vec` for all non-quantized vector values. This includes all element types: `float`, `byte`, and `bit`. +* `veq` for quantized vectors indexed with <>: `int4` or `int8` +* `veb` for binary vectors indexed with <>: `bbq` +* `vem`, `vemf`, `vemq`, and `vemb` for metadata, usually small and not a concern for preloading + +Generally, if you are using a quantized index, you should only preload the relevant quantized values and the HNSW graph. +Preloading the raw vectors is not necessary and might be counterproductive. [discrete] === Reduce the number of index segments diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index 9b30ba9dbde35..aba0850c76437 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -143,8 +143,8 @@ terms dictionaries, postings lists and points, which are the most important parts of the index for search and aggregations. For vector search, you use <>, -you might want to set the setting to vector search files: `["vec", "vex", "vem"]` -("vec" is used for vector values, "vex" – for HNSW graph, "vem" – for metadata). +you might want to set the setting to vector search files. See <> for a detailed +list of the files. Note that this setting can be dangerous on indices that are larger than the size of the main memory of the host, as it would cause the filesystem cache to be diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 8f7cef06d12a0..f4fcc0fc84d0d 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -77,7 +77,10 @@ Computes a hash of the document’s content. Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. <>:: -Adds information about the geographical location of an IPv4 or IPv6 address. +Adds information about the geographical location of an IPv4 or IPv6 address from a Maxmind database. + +<>:: +Adds information about the geographical location of an IPv4 or IPv6 address from an ip geolocation database. <>:: Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. @@ -245,6 +248,7 @@ include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] include::processors/html_strip.asciidoc[] include::processors/inference.asciidoc[] +include::processors/ip-location.asciidoc[] include::processors/join.asciidoc[] include::processors/json.asciidoc[] include::processors/kv.asciidoc[] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 2eff56f87e826..78ebe3f5b5ee3 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -13,7 +13,7 @@ ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[ CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: * `ingest.geoip.downloader.eager.download` is set to true -* your cluster has at least one pipeline with a `geoip` processor +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor {es} automatically downloads updates for these databases from the Elastic GeoIP endpoint: @@ -25,10 +25,10 @@ If your cluster can't connect to the Elastic GeoIP endpoint or you want to manage your own updates, see <>. If you would like to have {es} download database files directly from Maxmind using your own provided -license key, see <>. +license key, see <>. If {es} can't connect to the endpoint for 30 days all updated databases will become -invalid. {es} will stop enriching documents with geoip data and will add `tags: ["_geoip_expired_database"]` +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_geoip_expired_database"]` field instead. [[using-ingest-geoip]] @@ -40,11 +40,11 @@ field instead. |====== | Name | Required | Default | Description | `field` | yes | - | The field to get the IP address from for the geographical lookup. -| `target_field` | no | geoip | The field that will hold the geographical information looked up from the MaxMind database. -| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). -| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the geoip lookup. +| `target_field` | no | geoip | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document -| `first_only` | no | `true` | If `true` only first found geoip data will be returned, even if `field` contains array +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array | `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. |====== @@ -79,15 +79,13 @@ depend on what has been found and which properties were configured in `propertie `residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and `connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. -preview::["Do not use the GeoIP2 Anonymous IP, GeoIP2 Connection Type, GeoIP2 Domain, GeoIP2 ISP, and GeoIP2 Enterprise databases in production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] - Here is an example that uses the default city database and adds the geographical information to the `geoip` field based on the `ip` field: [source,console] -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -138,7 +136,7 @@ this database is downloaded automatically. So this: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -190,7 +188,7 @@ cannot be found: -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -256,7 +254,7 @@ PUT my_ip_locations -------------------------------------------------- PUT _ingest/pipeline/geoip { - "description" : "Add geoip info", + "description" : "Add ip geolocation info", "processors" : [ { "geoip" : { @@ -429,7 +427,7 @@ The `geoip` processor supports the following setting: The maximum number of results that should be cached. Defaults to `1000`. -Note that these settings are node settings and apply to all `geoip` processors, i.e. there is one cache for all defined `geoip` processors. +Note that these settings are node settings and apply to all `geoip` and `ip_location` processors, i.e. there is a single cache for all such processors. [[geoip-cluster-settings]] ===== Cluster settings @@ -458,7 +456,7 @@ each node's <> at `$ES_TMPDIR/geoip-databases/IP Location +++++ + +The `ip_location` processor adds information about the geographical location of an +IPv4 or IPv6 address. + +[[ip-location-automatic-updates]] +By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 +ASN IP geolocation databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the +CC BY-SA 4.0 license. It automatically downloads these databases if your nodes can connect to `storage.googleapis.com` domain and either: + +* `ingest.geoip.downloader.eager.download` is set to true +* your cluster has at least one pipeline with a `geoip` or `ip_location` processor + +{es} automatically downloads updates for these databases from the Elastic GeoIP +endpoint: +https://geoip.elastic.co/v1/database?elastic_geoip_service_tos=agree[https://geoip.elastic.co/v1/database]. +To get download statistics for these updates, use the <>. + +If your cluster can't connect to the Elastic GeoIP endpoint or you want to +manage your own updates, see <>. + +If you would like to have {es} download database files directly from Maxmind using your own provided +license key, see <>. + +If {es} can't connect to the endpoint for 30 days all updated databases will become +invalid. {es} will stop enriching documents with ip geolocation data and will add `tags: ["_ip_location_expired_database"]` +field instead. + +[[using-ingest-ip-location]] +==== Using the `ip_location` Processor in a Pipeline + +[[ingest-ip-location-options]] +.`ip-location` options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to get the IP address from for the geographical lookup. +| `target_field` | no | ip_location | The field that will hold the geographical information looked up from the database. +| `database_file` | no | GeoLite2-City.mmdb | The database filename referring to one of the automatically downloaded GeoLite2 databases (GeoLite2-City.mmdb, GeoLite2-Country.mmdb, or GeoLite2-ASN.mmdb), or the name of a supported database file in the `ingest-geoip` config directory, or the name of a <> (with the `.mmdb` suffix appended). +| `properties` | no | [`continent_name`, `country_iso_code`, `country_name`, `region_iso_code`, `region_name`, `city_name`, `location`] * | Controls what properties are added to the `target_field` based on the ip geolocation lookup. +| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `first_only` | no | `true` | If `true` only first found ip geolocation data, will be returned, even if `field` contains array +| `download_database_on_pipeline_creation` | no | `true` | If `true` (and if `ingest.geoip.downloader.eager.download` is `false`), the missing database is downloaded when the pipeline is created. Else, the download is triggered by when the pipeline is used as the `default_pipeline` or `final_pipeline` in an index. +|====== + +*Depends on what is available in `database_file`: + +* If a GeoLite2 City or GeoIP2 City database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, and `accuracy_radius`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If a GeoLite2 Country or GeoIP2 Country database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, and `continent_name`. The fields actually added depend on what has been found +and which properties were configured in `properties`. +* If the GeoLite2 ASN database is used, then the following fields may be added under the `target_field`: `ip`, +`asn`, `organization_name` and `network`. The fields actually added depend on what has been found and which properties were configured +in `properties`. +* If the GeoIP2 Anonymous IP database is used, then the following fields may be added under the `target_field`: `ip`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, and `residential_proxy`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Connection Type database is used, then the following fields may be added under the `target_field`: `ip`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Domain database is used, then the following fields may be added under the `target_field`: `ip`, and `domain`. +The fields actually added depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 ISP database is used, then the following fields may be added under the `target_field`: `ip`, `asn`, +`organization_name`, `network`, `isp`, `isp_organization_name`, `mobile_country_code`, and `mobile_network_code`. The fields actually added +depend on what has been found and which properties were configured in `properties`. +* If the GeoIP2 Enterprise database is used, then the following fields may be added under the `target_field`: `ip`, +`country_iso_code`, `country_name`, `country_in_european_union`, `registered_country_iso_code`, `registered_country_name`, `registered_country_in_european_union`, +`continent_code`, `continent_name`, `region_iso_code`, `region_name`, `city_name`, `postal_code`, `timezone`, +`location`, `accuracy_radius`, `country_confidence`, `city_confidence`, `postal_confidence`, `asn`, `organization_name`, `network`, +`hosting_provider`, `tor_exit_node`, `anonymous_vpn`, `anonymous`, `public_proxy`, +`residential_proxy`, `domain`, `isp`, `isp_organization_name`, `mobile_country_code`, `mobile_network_code`, `user_type`, and +`connection_type`. The fields actually added depend on what has been found and which properties were configured in `properties`. + +Here is an example that uses the default city database and adds the geographical information to the `ip_location` field based on the `ip` field: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 55, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "ip_location": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE", + "city_name" : "Linköping", + "region_iso_code" : "SE-E", + "region_name" : "Östergötland County", + "location": { "lat": 58.4167, "lon": 15.6167 } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term":1/"_primary_term" : $body._primary_term/] + +Here is an example that uses the default country database and adds the +geographical information to the `geo` field based on the `ip` field. Note that +this database is downloaded automatically. So this: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip", + "target_field" : "geo", + "database_file" : "GeoLite2-Country.mmdb" + } + } + ] +} +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "89.160.20.128" +} +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +returns this: + +[source,console-result] +-------------------------------------------------- +{ + "found": true, + "_index": "my-index-000001", + "_id": "my_id", + "_version": 1, + "_seq_no": 65, + "_primary_term": 1, + "_source": { + "ip": "89.160.20.128", + "geo": { + "continent_name": "Europe", + "country_name": "Sweden", + "country_iso_code": "SE" + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] + + +Not all IP addresses find geo information from the database, When this +occurs, no `target_field` is inserted into the document. + +Here is an example of what documents will be indexed as when information for "80.231.5.0" +cannot be found: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/ip_location +{ + "description" : "Add ip geolocation info", + "processors" : [ + { + "ip_location" : { + "field" : "ip" + } + } + ] +} + +PUT my-index-000001/_doc/my_id?pipeline=ip_location +{ + "ip": "80.231.5.0" +} + +GET my-index-000001/_doc/my_id +-------------------------------------------------- + +Which returns: + +[source,console-result] +-------------------------------------------------- +{ + "_index" : "my-index-000001", + "_id" : "my_id", + "_version" : 1, + "_seq_no" : 71, + "_primary_term": 1, + "found" : true, + "_source" : { + "ip" : "80.231.5.0" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] diff --git a/docs/reference/rest-api/security/bulk-create-roles.asciidoc b/docs/reference/rest-api/security/bulk-create-roles.asciidoc index a198f49383907..37f49f2445770 100644 --- a/docs/reference/rest-api/security/bulk-create-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-create-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-put-role]] === Bulk create or update roles API -preview::[] ++++ Bulk create or update roles API ++++ @@ -103,7 +102,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. diff --git a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc index a782b5e37fcb9..b9978c89bef3a 100644 --- a/docs/reference/rest-api/security/bulk-delete-roles.asciidoc +++ b/docs/reference/rest-api/security/bulk-delete-roles.asciidoc @@ -1,7 +1,6 @@ [role="xpack"] [[security-api-bulk-delete-role]] === Bulk delete roles API -preview::[] ++++ Bulk delete roles API ++++ diff --git a/docs/reference/rest-api/security/create-roles.asciidoc b/docs/reference/rest-api/security/create-roles.asciidoc index a1ab892330e67..d23b9f06e2d87 100644 --- a/docs/reference/rest-api/security/create-roles.asciidoc +++ b/docs/reference/rest-api/security/create-roles.asciidoc @@ -105,7 +105,9 @@ They have no effect for remote clusters configured with the <> can be used to determine +which privileges are allowed per version. For more information, see <>. @@ -176,21 +178,29 @@ POST /_security/role/cli_or_drivers_minimal -------------------------------------------------- // end::sql-queries-permission[] -The following example configures a role with remote indices privileges on a remote cluster: +The following example configures a role with remote indices and remote cluster privileges for a remote cluster: [source,console] -------------------------------------------------- -POST /_security/role/role_with_remote_indices +POST /_security/role/only_remote_access_role { "remote_indices": [ { - "clusters": [ "my_remote" ], <1> + "clusters": ["my_remote"], <1> "names": ["logs*"], <2> "privileges": ["read", "read_cross_cluster", "view_index_metadata"] <3> } + ], + "remote_cluster": [ + { + "clusters": ["my_remote"], <1> + "privileges": ["monitor_stats"] <4> + } ] } -------------------------------------------------- -<1> The remote indices privileges apply to remote cluster with the alias `my_remote`. -<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster ( `my_remote`). +<1> The remote indices and remote cluster privileges apply to remote cluster with the alias `my_remote`. +<2> Privileges are granted for indices matching pattern `logs*` on the remote cluster (`my_remote`). <3> The actual <> granted for `logs*` on `my_remote`. +<4> The actual <> granted for `my_remote`. +Note - only a subset of the cluster privileges are supported for remote clusters. diff --git a/docs/reference/security/authorization/managing-roles.asciidoc b/docs/reference/security/authorization/managing-roles.asciidoc index 535d70cbc5e9c..0c3f520605f07 100644 --- a/docs/reference/security/authorization/managing-roles.asciidoc +++ b/docs/reference/security/authorization/managing-roles.asciidoc @@ -249,12 +249,10 @@ The following describes the structure of a remote cluster permissions entry: <> and <>. This field is required. <2> The cluster level privileges for the remote cluster. The allowed values here are a subset of the -<>. This field is required. +<>. +The <> can be used to determine +which privileges are allowed here. This field is required. -The `monitor_enrich` privilege for remote clusters was introduced in version -8.15.0. Currently, this is the only privilege available for remote clusters and -is required to enable users to use the `ENRICH` keyword in ES|QL queries across -clusters. ==== Example diff --git a/docs/reference/security/authorization/privileges.asciidoc b/docs/reference/security/authorization/privileges.asciidoc index 747b1eef40441..3b69e5c1ba984 100644 --- a/docs/reference/security/authorization/privileges.asciidoc +++ b/docs/reference/security/authorization/privileges.asciidoc @@ -250,6 +250,11 @@ Privileges to list and view details on existing repositories and snapshots. + This privilege is not available in {serverless-full}. +`monitor_stats`:: +Privileges to list and view details of stats. ++ +This privilege is not available in {serverless-full}. + `monitor_text_structure`:: All read-only operations related to the <>. + diff --git a/muted-tests.yml b/muted-tests.yml index 505b035ff477f..5fbf32752d7fb 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,9 +17,6 @@ tests: - class: org.elasticsearch.smoketest.WatcherYamlRestIT method: test {p0=watcher/usage/10_basic/Test watcher usage stats output} issue: https://github.com/elastic/elasticsearch/issues/112189 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_processor/Test create processor with missing mandatory fields} - issue: https://github.com/elastic/elasticsearch/issues/112191 - class: org.elasticsearch.xpack.esql.action.ManyShardsIT method: testRejection issue: https://github.com/elastic/elasticsearch/issues/112406 @@ -142,9 +139,6 @@ tests: - class: org.elasticsearch.search.SearchServiceTests method: testParseSourceValidation issue: https://github.com/elastic/elasticsearch/issues/115936 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/115970 - class: org.elasticsearch.index.reindex.ReindexNodeShutdownIT method: testReindexWithShutdown issue: https://github.com/elastic/elasticsearch/issues/115996 @@ -168,48 +162,27 @@ tests: - class: org.elasticsearch.xpack.ml.integration.DatafeedJobsRestIT method: testLookbackWithIndicesOptions issue: https://github.com/elastic/elasticsearch/issues/116127 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116133 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113054 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize ASYNC} issue: https://github.com/elastic/elasticsearch/issues/113055 -- class: org.elasticsearch.xpack.inference.InferenceRestIT - method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} - issue: https://github.com/elastic/elasticsearch/issues/114376 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test force delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116136 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=transform/transforms_start_stop/Test start already started transform} issue: https://github.com/elastic/elasticsearch/issues/98802 - class: org.elasticsearch.action.search.SearchPhaseControllerTests method: testProgressListener issue: https://github.com/elastic/elasticsearch/issues/116149 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/forecast/Test forecast unknown job} - issue: https://github.com/elastic/elasticsearch/issues/116150 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=terms_enum/10_basic/Test security} issue: https://github.com/elastic/elasticsearch/issues/116178 - class: org.elasticsearch.search.basic.SearchWithRandomDisconnectsIT method: testSearchWithRandomDisconnects issue: https://github.com/elastic/elasticsearch/issues/116175 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/start_stop_datafeed/Test start datafeed given index pattern with no matching indices} - issue: https://github.com/elastic/elasticsearch/issues/116220 - class: org.elasticsearch.search.basic.SearchWhileRelocatingIT method: testSearchAndRelocateConcurrentlyRandomReplicas issue: https://github.com/elastic/elasticsearch/issues/116145 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/filter_crud/Test update filter} - issue: https://github.com/elastic/elasticsearch/issues/116271 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/get_datafeeds/Test explicit get all datafeeds} - issue: https://github.com/elastic/elasticsearch/issues/116284 - class: org.elasticsearch.xpack.deprecation.DeprecationHttpIT method: testDeprecatedSettingsReturnWarnings issue: https://github.com/elastic/elasticsearch/issues/108628 @@ -231,24 +204,9 @@ tests: - class: org.elasticsearch.threadpool.SimpleThreadPoolIT method: testThreadPoolMetrics issue: https://github.com/elastic/elasticsearch/issues/108320 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/jobs_crud/Test put job deprecated bucket span} - issue: https://github.com/elastic/elasticsearch/issues/116419 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/explain_data_frame_analytics/Test both job id and body} - issue: https://github.com/elastic/elasticsearch/issues/116433 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/inference_crud/Test force delete given model with alias referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116443 - class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT method: testILMDownsampleRollingRestart issue: https://github.com/elastic/elasticsearch/issues/114233 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/data_frame_analytics_crud/Test put config with unknown field in outlier detection analysis} - issue: https://github.com/elastic/elasticsearch/issues/116458 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/evaluate_data_frame/Test outlier_detection with query} - issue: https://github.com/elastic/elasticsearch/issues/116484 - class: org.elasticsearch.xpack.kql.query.KqlQueryBuilderTests issue: https://github.com/elastic/elasticsearch/issues/116487 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests @@ -260,15 +218,24 @@ tests: - class: org.elasticsearch.xpack.logsdb.qa.StandardVersusLogsIndexModeRandomDataDynamicMappingChallengeRestIT method: testMatchAllQuery issue: https://github.com/elastic/elasticsearch/issues/116536 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=ml/inference_crud/Test force delete given model referenced by pipeline} - issue: https://github.com/elastic/elasticsearch/issues/116555 -- class: org.elasticsearch.smoketest.MlWithSecurityIT - method: test {yaml=ml/data_frame_analytics_crud/Test delete given stopped config} - issue: https://github.com/elastic/elasticsearch/issues/116608 - class: org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT method: test {categorize.Categorize} issue: https://github.com/elastic/elasticsearch/issues/116434 +- class: org.elasticsearch.upgrades.SearchStatesIT + method: testBWCSearchStates + issue: https://github.com/elastic/elasticsearch/issues/116617 +- class: org.elasticsearch.upgrades.SearchStatesIT + method: testCanMatch + issue: https://github.com/elastic/elasticsearch/issues/116618 +- class: org.elasticsearch.packaging.test.ArchiveGenerateInitialCredentialsTests + method: test20NoAutoGenerationWhenAutoConfigurationDisabled + issue: https://github.com/elastic/elasticsearch/issues/116619 +- class: org.elasticsearch.packaging.test.BootstrapCheckTests + method: test20RunWithBootstrapChecks + issue: https://github.com/elastic/elasticsearch/issues/116620 +- class: org.elasticsearch.packaging.test.DockerTests + method: test011SecurityEnabledStatus + issue: https://github.com/elastic/elasticsearch/issues/116628 # Examples: # diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5e4df05c10182..909d733fd3719 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -187,6 +187,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_15_2 = new Version(8_15_02_99); public static final Version V_8_15_3 = new Version(8_15_03_99); public static final Version V_8_15_4 = new Version(8_15_04_99); + public static final Version V_8_15_5 = new Version(8_15_05_99); public static final Version V_8_16_0 = new Version(8_16_00_99); public static final Version V_8_17_0 = new Version(8_17_00_99); public static final Version V_9_0_0 = new Version(9_00_00_99); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index afbfe129c302e..2927c394da3d4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -130,6 +130,14 @@ public SearchRequestBuilder setWaitForCheckpoints(Map waitForChe return this; } + /** + * Set the timeout for the {@link #setWaitForCheckpoints(Map)} request. + */ + public SearchRequestBuilder setWaitForCheckpointsTimeout(final TimeValue waitForCheckpointsTimeout) { + request.setWaitForCheckpointsTimeout(waitForCheckpointsTimeout); + return this; + } + /** * Specifies what type of requested indices to ignore and wildcard indices expressions. *

diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 51f52326907eb..a1e8eb25f4780 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.DelayableWriteable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -260,7 +261,7 @@ private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resol this.formatSortValues = formatSortValues; } - public DateTime(StreamInput in) throws IOException { + private DateTime(StreamInput in) throws IOException { String formatterPattern = in.readString(); Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES) ? LocaleUtils.parse(in.readString()) @@ -285,6 +286,14 @@ public String getWriteableName() { return NAME; } + public static DateTime readFrom(StreamInput in) throws IOException { + final DateTime dateTime = new DateTime(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(dateTime); + } + return dateTime; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); @@ -528,7 +537,7 @@ public Decimal(String pattern) { this.format = new DecimalFormat(pattern, SYMBOLS); } - public Decimal(StreamInput in) throws IOException { + private Decimal(StreamInput in) throws IOException { this(in.readString()); } @@ -537,6 +546,14 @@ public String getWriteableName() { return NAME; } + public static Decimal readFrom(StreamInput in) throws IOException { + final Decimal decimal = new Decimal(in); + if (in instanceof DelayableWriteable.Deduplicator d) { + return d.deduplicate(decimal); + } + return decimal; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(pattern); diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index fd39a95bdb75d..7a8b4e0cfe95a 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -1013,8 +1013,8 @@ private void registerScoreFunction(ScoreFunctionSpec scoreFunction) { private void registerValueFormats() { registerValueFormat(DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN); - registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new); - registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new); + registerValueFormat(DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom); + registerValueFormat(DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom); registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH); registerValueFormat(DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE); registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 38cab1761d409..b829afb0c23b0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -35,7 +35,6 @@ */ public abstract class InternalAggregation implements Aggregation, NamedWriteable { protected final String name; - protected final Map metadata; /** @@ -53,12 +52,14 @@ protected InternalAggregation(String name, Map metadata) { */ protected InternalAggregation(StreamInput in) throws IOException { final String name = in.readString(); + final Map metadata = in.readGenericMap(); if (in instanceof DelayableWriteable.Deduplicator d) { this.name = d.deduplicate(name); + this.metadata = metadata == null || metadata.isEmpty() ? metadata : d.deduplicate(metadata); } else { this.name = name; + this.metadata = metadata; } - metadata = in.readGenericMap(); } @Override diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index b0ef5b780e775..26c518962c19a 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -131,3 +131,4 @@ 8.15.1,8702002 8.15.2,8702003 8.15.3,8702003 +8.15.4,8702003 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index e3681cc975988..6cab0b513ee63 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -131,3 +131,4 @@ 8.15.1,8512000 8.15.2,8512000 8.15.3,8512000 +8.15.4,8512000 diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index e81066a731d2e..7c9a68cbc91f1 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -43,8 +43,8 @@ public class DocValueFormatTests extends ESTestCase { public void testSerialization() throws Exception { List entries = new ArrayList<>(); entries.add(new Entry(DocValueFormat.class, DocValueFormat.BOOLEAN.getWriteableName(), in -> DocValueFormat.BOOLEAN)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::new)); - entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::new)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.DateTime.NAME, DocValueFormat.DateTime::readFrom)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.Decimal.NAME, DocValueFormat.Decimal::readFrom)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOTILE.getWriteableName(), in -> DocValueFormat.GEOTILE)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java index 6f6224d505327..25d9509ecdc7a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/integration/MlRestTestStateCleaner.java @@ -10,14 +10,15 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; public class MlRestTestStateCleaner { @@ -30,24 +31,29 @@ public MlRestTestStateCleaner(Logger logger, RestClient adminClient) { } public void resetFeatures() throws IOException { - waitForMlStatsIndexToInitialize(); - deleteAllTrainedModelIngestPipelines(); + deletePipelinesWithInferenceProcessors(); // This resets all features, not just ML, but they should have been getting reset between tests anyway so it shouldn't matter adminClient.performRequest(new Request("POST", "/_features/_reset")); } @SuppressWarnings("unchecked") - private void deleteAllTrainedModelIngestPipelines() throws IOException { - final Request getAllTrainedModelStats = new Request("GET", "/_ml/trained_models/_stats"); - getAllTrainedModelStats.addParameter("size", "10000"); - final Response trainedModelsStatsResponse = adminClient.performRequest(getAllTrainedModelStats); + private void deletePipelinesWithInferenceProcessors() throws IOException { + final Response pipelinesResponse = adminClient.performRequest(new Request("GET", "/_ingest/pipeline")); + final Map pipelines = ESRestTestCase.entityAsMap(pipelinesResponse); + + var pipelinesWithInferenceProcessors = new HashSet(); + for (var entry : pipelines.entrySet()) { + var pipelineDef = (Map) entry.getValue(); // each top level object is a separate pipeline + var processors = (List>) pipelineDef.get("processors"); + for (var processor : processors) { + assertThat(processor.entrySet(), hasSize(1)); + if ("inference".equals(processor.keySet().iterator().next())) { + pipelinesWithInferenceProcessors.add(entry.getKey()); + } + } + } - final List> pipelines = (List>) XContentMapValues.extractValue( - "trained_model_stats.ingest.pipelines", - ESRestTestCase.entityAsMap(trainedModelsStatsResponse) - ); - Set pipelineIds = pipelines.stream().flatMap(m -> m.keySet().stream()).collect(Collectors.toSet()); - for (String pipelineId : pipelineIds) { + for (String pipelineId : pipelinesWithInferenceProcessors) { try { adminClient.performRequest(new Request("DELETE", "/_ingest/pipeline/" + pipelineId)); } catch (Exception ex) { @@ -55,12 +61,4 @@ private void deleteAllTrainedModelIngestPipelines() throws IOException { } } } - - private void waitForMlStatsIndexToInitialize() throws IOException { - ESRestTestCase.ensureHealth(adminClient, ".ml-stats-*", (request) -> { - request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("level", "shards"); - request.addParameter("timeout", "30s"); - }); - } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index c80f26cda7b36..d13f3cda2a82c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -90,7 +90,7 @@ private DeprecationChecks() {} NodeDeprecationChecks::checkWatcherBulkConcurrentRequestsSetting ); - static List> INDEX_SETTINGS_CHECKS = List.of( + static List> INDEX_SETTINGS_CHECKS = List.of( IndexDeprecationChecks::oldIndicesCheck, IndexDeprecationChecks::translogRetentionSettingCheck, IndexDeprecationChecks::checkIndexDataPath, diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index cd26e23394e81..87d0bfb93e18c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -274,7 +274,7 @@ public static DeprecationInfoAction.Response from( IndexNameExpressionResolver indexNameExpressionResolver, Request request, NodesDeprecationCheckResponse nodeDeprecationResponse, - List> indexSettingsChecks, + List> indexSettingsChecks, List> dataStreamChecks, List> clusterSettingsChecks, Map> pluginSettingIssues, @@ -293,7 +293,10 @@ public static DeprecationInfoAction.Response from( Map> indexSettingsIssues = new HashMap<>(); for (String concreteIndex : concreteIndexNames) { IndexMetadata indexMetadata = stateWithSkippedSettingsRemoved.getMetadata().index(concreteIndex); - List singleIndexIssues = filterChecks(indexSettingsChecks, c -> c.apply(indexMetadata)); + List singleIndexIssues = filterChecks( + indexSettingsChecks, + c -> c.apply(indexMetadata, stateWithSkippedSettingsRemoved) + ); if (singleIndexIssues.size() > 0) { indexSettingsIssues.put(concreteIndex, singleIndexIssues); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 3da32c7f5a4c2..8144d960df2e8 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.time.DateFormatter; @@ -30,14 +31,15 @@ */ public class IndexDeprecationChecks { - static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { + static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata, ClusterState clusterState) { // TODO: this check needs to be revised. It's trivially true right now. IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion(); - if (currentCompatibilityVersion.before(IndexVersions.V_7_0_0)) { + // We intentionally exclude indices that are in data streams because they will be picked up by DataStreamDeprecationChecks + if (currentCompatibilityVersion.before(IndexVersions.V_8_0_0) && isNotDataStreamIndex(indexMetadata, clusterState)) { return new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 7.0", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + "breaking-changes-8.0.html", + "Old index with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + currentCompatibilityVersion.toReleaseVersion(), false, null @@ -46,7 +48,11 @@ static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadata) { + private static boolean isNotDataStreamIndex(IndexMetadata indexMetadata, ClusterState clusterState) { + return clusterState.metadata().findDataStreams(indexMetadata.getIndex().getName()).isEmpty(); + } + + static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { final boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexMetadata.getSettings()); if (softDeletesEnabled) { if (IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(indexMetadata.getSettings()) @@ -73,7 +79,7 @@ static DeprecationIssue translogRetentionSettingCheck(IndexMetadata indexMetadat return null; } - static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata) { + static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata, ClusterState clusterState) { if (IndexMetadata.INDEX_DATA_PATH_SETTING.exists(indexMetadata.getSettings())) { final String message = String.format( Locale.ROOT, @@ -88,7 +94,7 @@ static DeprecationIssue checkIndexDataPath(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata) { + static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { final String storeType = IndexModule.INDEX_STORE_TYPE_SETTING.get(indexMetadata.getSettings()); if (IndexModule.Type.SIMPLEFS.match(storeType)) { return new DeprecationIssue( @@ -105,7 +111,7 @@ static DeprecationIssue storeTypeSettingCheck(IndexMetadata indexMetadata) { return null; } - static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata) { + static DeprecationIssue frozenIndexSettingCheck(IndexMetadata indexMetadata, ClusterState clusterState) { Boolean isIndexFrozen = FrozenEngine.INDEX_FROZEN.get(indexMetadata.getSettings()); if (Boolean.TRUE.equals(isIndexFrozen)) { String indexName = indexMetadata.getIndex().getName(); @@ -195,7 +201,7 @@ static List findInPropertiesRecursively( return issues; } - static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata) { + static DeprecationIssue deprecatedCamelCasePattern(IndexMetadata indexMetadata, ClusterState clusterState) { List fields = new ArrayList<>(); fieldLevelMappingIssue( indexMetadata, diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java index 5750daa8e3673..67950f3b9f623 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/DeprecationInfoActionResponseTests.java @@ -117,7 +117,9 @@ public void testFrom() throws IOException { boolean dataStreamIssueFound = randomBoolean(); DeprecationIssue foundIssue = createTestDeprecationIssue(); List> clusterSettingsChecks = List.of((s) -> clusterIssueFound ? foundIssue : null); - List> indexSettingsChecks = List.of((idx) -> indexIssueFound ? foundIssue : null); + List> indexSettingsChecks = List.of( + (idx, cs) -> indexIssueFound ? foundIssue : null + ); List> dataStreamChecks = List.of( (ds, cs) -> dataStreamIssueFound ? foundIssue : null ); @@ -211,7 +213,7 @@ public void testFromWithMergeableNodeIssues() throws IOException { DeprecationIssue foundIssue1 = createTestDeprecationIssue(metaMap1); DeprecationIssue foundIssue2 = createTestDeprecationIssue(foundIssue1, metaMap2); List> clusterSettingsChecks = Collections.emptyList(); - List> indexSettingsChecks = List.of((idx) -> null); + List> indexSettingsChecks = List.of((idx, cs) -> null); List> dataStreamChecks = List.of((ds, cs) -> null); NodesDeprecationCheckResponse nodeDeprecationIssues = new NodesDeprecationCheckResponse( @@ -276,10 +278,12 @@ public void testRemoveSkippedSettings() throws IOException { return null; })); AtomicReference visibleIndexSettings = new AtomicReference<>(); - List> indexSettingsChecks = Collections.unmodifiableList(Arrays.asList((idx) -> { - visibleIndexSettings.set(idx.getSettings()); - return null; - })); + List> indexSettingsChecks = Collections.unmodifiableList( + Arrays.asList((idx, cs) -> { + visibleIndexSettings.set(idx.getSettings()); + return null; + }) + ); AtomicInteger backingIndicesCount = new AtomicInteger(0); List> dataStreamChecks = Collections.unmodifiableList( Arrays.asList((ds, cs) -> { diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 18872d00d54a0..48cbef6831a2b 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -7,8 +7,15 @@ package org.elasticsearch.xpack.deprecation; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamMetadata; +import org.elasticsearch.cluster.metadata.DataStreamOptions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -19,39 +26,89 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.collection.IsIterableContainingInOrder.contains; public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - IndexVersion createdWith = IndexVersion.fromId(1000099); + IndexVersion createdWith = IndexVersion.fromId(7170099); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings(settings(createdWith)) .numberOfShards(1) .numberOfReplicas(0) .build(); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(indexMetadata, true)) + .build(); DeprecationIssue expected = new DeprecationIssue( DeprecationIssue.Level.CRITICAL, - "Old index with a compatibility version < 7.0", - "https://www.elastic.co/guide/en/elasticsearch/reference/master/" + "breaking-changes-8.0.html", + "Old index with a compatibility version < 8.0", + "https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-9.0.html", "This index has version: " + createdWith.toReleaseVersion(), false, null ); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); assertEquals(singletonList(expected), issues); } + public void testOldIndicesCheckDataStreamIndex() { + IndexVersion createdWith = IndexVersion.fromId(7170099); + IndexMetadata indexMetadata = IndexMetadata.builder(".ds-test") + .settings(settings(createdWith).put("index.hidden", true)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + DataStream dataStream = new DataStream( + randomAlphaOfLength(10), + List.of(indexMetadata.getIndex()), + randomNegativeLong(), + Map.of(), + randomBoolean(), + false, + false, + randomBoolean(), + randomFrom(IndexMode.values()), + null, + randomFrom(DataStreamOptions.EMPTY, DataStreamOptions.FAILURE_STORE_DISABLED, DataStreamOptions.FAILURE_STORE_ENABLED, null), + List.of(), + randomBoolean(), + null + ); + ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata( + Metadata.builder() + .put(indexMetadata, true) + .customs( + Map.of( + DataStreamMetadata.TYPE, + new DataStreamMetadata( + ImmutableOpenMap.builder(Map.of("my-data-stream", dataStream)).build(), + ImmutableOpenMap.of() + ) + ) + ) + ) + .build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata, clusterState)); + assertThat(issues.size(), equalTo(0)); + } + public void testTranslogRetentionSettings() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -81,7 +138,10 @@ public void testDefaultTranslogRetentionSettings() { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); } IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat(issues, empty()); } @@ -89,7 +149,10 @@ public void testIndexDataPathSetting() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexMetadata.INDEX_DATA_PATH_SETTING.getKey(), createTempDir()); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); final String expectedUrl = "https://www.elastic.co/guide/en/elasticsearch/reference/7.13/breaking-changes-7.13.html#deprecate-shared-data-path-setting"; assertThat( @@ -111,7 +174,10 @@ public void testSimpleFSSetting() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs"); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -133,7 +199,10 @@ public void testFrozenIndex() { Settings.Builder settings = settings(IndexVersion.current()); settings.put(FrozenEngine.INDEX_FROZEN.getKey(), true); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetadata)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(indexMetadata, ClusterState.EMPTY_STATE) + ); assertThat( issues, contains( @@ -175,7 +244,10 @@ public void testCamelCaseDeprecation() throws IOException { false, null ); - List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(simpleIndex)); + List issues = DeprecationChecks.filterChecks( + INDEX_SETTINGS_CHECKS, + c -> c.apply(simpleIndex, ClusterState.EMPTY_STATE) + ); assertThat(issues, hasItem(expected)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 9039177e0643d..9c173795d0ab1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -51,6 +51,7 @@ import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.FoldablesConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -1226,6 +1227,16 @@ private Expression resolveConvertFunction(AbstractConvertFunction convert, List< if (convert.field() instanceof FieldAttribute fa && fa.field() instanceof InvalidMappedField imf) { HashMap typeResolutions = new HashMap<>(); Set supportedTypes = convert.supportedTypes(); + if (convert instanceof FoldablesConvertFunction fcf) { + // FoldablesConvertFunction does not accept fields as inputs, they only accept constants + String unresolvedMessage = "argument of [" + + fcf.sourceText() + + "] must be a constant, received [" + + Expressions.name(fa) + + "]"; + Expression ua = new UnresolvedAttribute(fa.source(), fa.name(), unresolvedMessage); + return fcf.replaceChildren(Collections.singletonList(ua)); + } imf.types().forEach(type -> { if (supportedTypes.contains(type.widenSmallNumeric())) { TypeResolutionKey key = new TypeResolutionKey(fa.name(), type); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java index 6e2b5bb63532d..8f43a6481db07 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FoldablesConvertFunction.java @@ -59,7 +59,8 @@ protected final TypeResolution resolveType() { @Override protected final Map factories() { - // TODO if a union type field is provided as an input, the correct error message is not shown, #112668 is a follow up + // This is used by ResolveUnionTypes, which is expected to be applied to ES fields only + // FoldablesConvertFunction takes only constants as inputs, so this is empty return Map.of(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index d6cda4a3a9ff7..0a34d6cd848bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -244,6 +244,34 @@ public void testUnsupportedAndMultiTypedFields() { + " [ip] in [test1, test2, test3] and [2] other indices, [keyword] in [test6]", error("from test* | where multi_typed is not null", analyzer) ); + + for (String functionName : List.of("to_timeduration", "to_dateperiod")) { + String lineNumber = functionName.equalsIgnoreCase("to_timeduration") ? "47" : "45"; + String errorType = functionName.equalsIgnoreCase("to_timeduration") ? "time_duration" : "date_period"; + assertEquals( + "1:" + lineNumber + ": Cannot use field [unsupported] with unsupported type [flattened]", + error("from test* | eval x = now() + " + functionName + "(unsupported)", analyzer) + ); + assertEquals( + "1:" + lineNumber + ": argument of [" + functionName + "(multi_typed)] must be a constant, received [multi_typed]", + error("from test* | eval x = now() + " + functionName + "(multi_typed)", analyzer) + ); + assertThat( + error("from test* | eval x = unsupported, y = now() + " + functionName + "(x)", analyzer), + containsString("1:23: Cannot use field [unsupported] with unsupported type [flattened]") + ); + assertThat( + error("from test* | eval x = multi_typed, y = now() + " + functionName + "(x)", analyzer), + containsString( + "1:48: argument of [" + + functionName + + "(x)] must be [" + + errorType + + " or string], " + + "found value [x] type [unsupported]" + ) + ); + } } public void testRoundFunctionInvalidInputs() { diff --git a/x-pack/plugin/fleet/qa/rest/build.gradle b/x-pack/plugin/fleet/qa/rest/build.gradle index fda9251c7ef34..dec624bc3cc56 100644 --- a/x-pack/plugin/fleet/qa/rest/build.gradle +++ b/x-pack/plugin/fleet/qa/rest/build.gradle @@ -1,8 +1,15 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ -dependencies { - yamlRestTestImplementation(testArtifact(project(xpackModule('core')))) -} +import org.elasticsearch.gradle.internal.info.BuildParams + +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-test-artifact' restResources { restApi { @@ -10,11 +17,17 @@ restResources { } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - extraConfigFile 'roles.yml', file('roles.yml') - user username: 'elastic_admin', password: 'admin-password' - user username: 'fleet_unprivileged_secrets', password: 'password', role: 'unprivileged_secrets' +artifacts { + restXpackTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} + +tasks.named('yamlRestTest') { + usesDefaultDistribution() +} +tasks.named('yamlRestCompatTest') { + usesDefaultDistribution() +} +if (BuildParams.inFipsJvm){ + // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC + tasks.named("yamlRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java index 202149abf11e1..bc49649bc1139 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/fleet/FleetRestIT.java @@ -12,8 +12,12 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class FleetRestIT extends ESClientYamlSuiteTestCase { @@ -21,14 +25,30 @@ public FleetRestIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); } + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.security.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("elastic_admin", "admin-password", "superuser", true) + .user("fleet_unprivileged_secrets", "password", "unprivileged_secrets", true) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restClientSettings() { - String authentication = basicAuthHeaderValue("elastic_admin", new SecureString("admin-password".toCharArray())); - return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", authentication).build(); + String token = basicAuthHeaderValue("elastic_admin", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } @ParametersFactory public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + } diff --git a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml index 5610502a65d23..4c168c8feb0cd 100644 --- a/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml +++ b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/fleet/20_wait_for_checkpoints.yml @@ -105,6 +105,7 @@ setup: index: "test-after-refresh" allow_partial_search_results: false wait_for_checkpoints: 2 + wait_for_checkpoints_timeout: 1m body: { query: { match_all: {} } } --- @@ -115,7 +116,7 @@ setup: body: - { "allow_partial_search_results": false, wait_for_checkpoints: 1 } - { query: { match_all: { } } } - - { "allow_partial_search_results": false, wait_for_checkpoints: 2 } + - { "allow_partial_search_results": false, wait_for_checkpoints: 2, wait_for_checkpoints_timeout: 1m } - { query: { match_all: { } } } - match: { responses.0._shards.successful: 1 } @@ -128,7 +129,7 @@ setup: - {query: { match_all: {} } } - { "index": "test-alias", "allow_partial_search_results": false, wait_for_checkpoints: 1 } - { query: { match_all: { } } } - - {"index": "test-refresh-disabled", "allow_partial_search_results": false, wait_for_checkpoints: 2} + - { "index": "test-refresh-disabled", "allow_partial_search_results": false, wait_for_checkpoints: 2, wait_for_checkpoints_timeout: 1m } - {query: { match_all: {} } } - match: { responses.0._shards.successful: 1 } diff --git a/x-pack/plugin/fleet/qa/rest/roles.yml b/x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/roles.yml similarity index 100% rename from x-pack/plugin/fleet/qa/rest/roles.yml rename to x-pack/plugin/fleet/qa/rest/src/yamlRestTest/resources/roles.yml diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 83249266c79ab..fe83acc8574aa 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -68,6 +68,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Stream; @@ -680,25 +681,13 @@ public void chunkedInfer( esModel.getConfigurations().getChunkingSettings() ).batchRequestsWithListeners(listener); - for (var batch : batchedRequests) { - var inferenceRequest = buildInferenceRequest( - esModel.mlNodeDeploymentId(), - EmptyConfigUpdate.INSTANCE, - batch.batch().inputs(), - inputType, - timeout - ); - - ActionListener mlResultsListener = batch.listener() - .delegateFailureAndWrap( - (l, inferenceResult) -> translateToChunkedResult(model.getTaskType(), inferenceResult.getInferenceResults(), l) - ); - - var maybeDeployListener = mlResultsListener.delegateResponse( - (l, exception) -> maybeStartDeployment(esModel, exception, inferenceRequest, mlResultsListener) - ); - - client.execute(InferModelAction.INSTANCE, inferenceRequest, maybeDeployListener); + if (batchedRequests.isEmpty()) { + listener.onResponse(List.of()); + } else { + // Avoid filling the inference queue by executing the batches in series + // Each batch contains up to EMBEDDING_MAX_BATCH_SIZE inference request + var sequentialRunner = new BatchIterator(esModel, inputType, timeout, batchedRequests); + sequentialRunner.run(); } } else { listener.onFailure(notElasticsearchModelException(model)); @@ -1018,6 +1007,82 @@ static TaskType inferenceConfigToTaskType(InferenceConfig config) { } } + /** + * Iterates over the batch executing a limited number requests at a time to avoid + * filling the ML node inference queue. + * + * First, a single request is executed, which can also trigger deploying a model + * if necessary. When this request is successfully executed, a callback executes + * N requests in parallel next. Each of these requests also has a callback that + * executes one more request, so that at all time N requests are in-flight. This + * continues until all requests are executed. + */ + class BatchIterator { + private static final int NUM_REQUESTS_INFLIGHT = 20; // * batch size = 200 + + private final AtomicInteger index = new AtomicInteger(); + private final ElasticsearchInternalModel esModel; + private final List requestAndListeners; + private final InputType inputType; + private final TimeValue timeout; + + BatchIterator( + ElasticsearchInternalModel esModel, + InputType inputType, + TimeValue timeout, + List requestAndListeners + ) { + this.esModel = esModel; + this.requestAndListeners = requestAndListeners; + this.inputType = inputType; + this.timeout = timeout; + } + + void run() { + // The first request may deploy the model, and upon completion runs + // NUM_REQUESTS_INFLIGHT in parallel. + inferenceExecutor.execute(() -> inferBatch(NUM_REQUESTS_INFLIGHT, true)); + } + + private void inferBatch(int runAfterCount, boolean maybeDeploy) { + int batchIndex = index.getAndIncrement(); + if (batchIndex >= requestAndListeners.size()) { + return; + } + executeRequest(batchIndex, maybeDeploy, () -> { + for (int i = 0; i < runAfterCount; i++) { + // Subsequent requests may not deploy the model, because the first request + // already did so. Upon completion, it runs one more request. + inferenceExecutor.execute(() -> inferBatch(1, false)); + } + }); + } + + private void executeRequest(int batchIndex, boolean maybeDeploy, Runnable runAfter) { + EmbeddingRequestChunker.BatchRequestAndListener batch = requestAndListeners.get(batchIndex); + var inferenceRequest = buildInferenceRequest( + esModel.mlNodeDeploymentId(), + EmptyConfigUpdate.INSTANCE, + batch.batch().inputs(), + inputType, + timeout + ); + logger.trace("Executing batch index={}", batchIndex); + + ActionListener listener = batch.listener() + .delegateFailureAndWrap( + (l, inferenceResult) -> translateToChunkedResult(esModel.getTaskType(), inferenceResult.getInferenceResults(), l) + ); + if (runAfter != null) { + listener = ActionListener.runAfter(listener, runAfter); + } + if (maybeDeploy) { + listener = listener.delegateResponse((l, exception) -> maybeStartDeployment(esModel, exception, inferenceRequest, l)); + } + client.execute(InferModelAction.INSTANCE, inferenceRequest, listener); + } + } + public static class Configuration { public static InferenceServiceConfiguration get() { return configuration.getOrCompute(); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java index c1be537a6b0a7..4fdf254101d3e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/EmbeddingRequestChunkerTests.java @@ -24,12 +24,25 @@ import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.startsWith; public class EmbeddingRequestChunkerTests extends ESTestCase { + public void testEmptyInput() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); + assertThat(batches, empty()); + } + + public void testBlankInput() { + var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); + var batches = new EmbeddingRequestChunker(List.of(""), 100, 100, 10, embeddingType).batchRequestsWithListeners(testListener()); + assertThat(batches, hasSize(1)); + } + public void testShortInputsAreSingleBatch() { String input = "one chunk"; var embeddingType = randomFrom(EmbeddingRequestChunker.EmbeddingType.values()); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index 89a27a921cbea..9a4d0dda82238 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; @@ -65,6 +66,7 @@ import org.elasticsearch.xpack.inference.InferencePlugin; import org.elasticsearch.xpack.inference.chunking.ChunkingSettingsTests; import org.elasticsearch.xpack.inference.chunking.EmbeddingRequestChunker; +import org.elasticsearch.xpack.inference.chunking.WordBoundaryChunkingSettings; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.junit.After; import org.junit.Before; @@ -72,12 +74,14 @@ import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -832,16 +836,16 @@ public void testParsePersistedConfig() { } } - public void testChunkInfer_E5WithNullChunkingSettings() { + public void testChunkInfer_E5WithNullChunkingSettings() throws InterruptedException { testChunkInfer_e5(null); } - public void testChunkInfer_E5ChunkingSettingsSet() { + public void testChunkInfer_E5ChunkingSettingsSet() throws InterruptedException { testChunkInfer_e5(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { + private void testChunkInfer_e5(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); @@ -889,6 +893,9 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -897,22 +904,23 @@ private void testChunkInfer_e5(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } - public void testChunkInfer_SparseWithNullChunkingSettings() { + public void testChunkInfer_SparseWithNullChunkingSettings() throws InterruptedException { testChunkInfer_Sparse(null); } - public void testChunkInfer_SparseWithChunkingSettingsSet() { + public void testChunkInfer_SparseWithChunkingSettingsSet() throws InterruptedException { testChunkInfer_Sparse(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { + private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); @@ -936,6 +944,7 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { var service = createService(client); var gotResults = new AtomicBoolean(); + var resultsListener = ActionListener.>wrap(chunkedResponse -> { assertThat(chunkedResponse, hasSize(2)); assertThat(chunkedResponse.get(0), instanceOf(InferenceChunkedSparseEmbeddingResults.class)); @@ -955,6 +964,9 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -963,22 +975,23 @@ private void testChunkInfer_Sparse(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } - public void testChunkInfer_ElserWithNullChunkingSettings() { + public void testChunkInfer_ElserWithNullChunkingSettings() throws InterruptedException { testChunkInfer_Elser(null); } - public void testChunkInfer_ElserWithChunkingSettingsSet() { + public void testChunkInfer_ElserWithChunkingSettingsSet() throws InterruptedException { testChunkInfer_Elser(ChunkingSettingsTests.createRandomChunkingSettings()); } @SuppressWarnings("unchecked") - private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { + private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); mlTrainedModelResults.add(TextExpansionResultsTests.createRandomResults()); @@ -1022,6 +1035,9 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -1030,9 +1046,10 @@ private void testChunkInfer_Elser(ChunkingSettings chunkingSettings) { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } @@ -1093,7 +1110,7 @@ public void testChunkInferSetsTokenization() { } @SuppressWarnings("unchecked") - public void testChunkInfer_FailsBatch() { + public void testChunkInfer_FailsBatch() throws InterruptedException { var mlTrainedModelResults = new ArrayList(); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); @@ -1129,6 +1146,9 @@ public void testChunkInfer_FailsBatch() { gotResults.set(true); }, ESTestCase::fail); + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + service.chunkedInfer( model, null, @@ -1137,12 +1157,86 @@ public void testChunkInfer_FailsBatch() { InputType.SEARCH, new ChunkingOptions(null, null), InferenceAction.Request.DEFAULT_TIMEOUT, - ActionListener.runAfter(resultsListener, () -> terminate(threadPool)) + latchedListener ); + latch.await(); assertTrue("Listener not called", gotResults.get()); } + @SuppressWarnings("unchecked") + public void testChunkingLargeDocument() throws InterruptedException { + int numBatches = randomIntBetween(3, 6); + + // how many response objects to return in each batch + int[] numResponsesPerBatch = new int[numBatches]; + for (int i = 0; i < numBatches - 1; i++) { + numResponsesPerBatch[i] = ElasticsearchInternalService.EMBEDDING_MAX_BATCH_SIZE; + } + numResponsesPerBatch[numBatches - 1] = randomIntBetween(1, ElasticsearchInternalService.EMBEDDING_MAX_BATCH_SIZE); + int numChunks = Arrays.stream(numResponsesPerBatch).sum(); + + // build a doc with enough words to make numChunks of chunks + int wordsPerChunk = 10; + int numWords = numChunks * wordsPerChunk; + var input = "word ".repeat(numWords); + + Client client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + + // mock the inference response + doAnswer(invocationOnMock -> { + var request = (InferModelAction.Request) invocationOnMock.getArguments()[1]; + var listener = (ActionListener) invocationOnMock.getArguments()[2]; + var mlTrainedModelResults = new ArrayList(); + for (int i = 0; i < request.numberOfDocuments(); i++) { + mlTrainedModelResults.add(MlTextEmbeddingResultsTests.createRandomResults()); + } + var response = new InferModelAction.Response(mlTrainedModelResults, "foo", true); + listener.onResponse(response); + return null; + }).when(client).execute(same(InferModelAction.INSTANCE), any(InferModelAction.Request.class), any(ActionListener.class)); + + var service = createService(client); + + var gotResults = new AtomicBoolean(); + var resultsListener = ActionListener.>wrap(chunkedResponse -> { + assertThat(chunkedResponse, hasSize(1)); + assertThat(chunkedResponse.get(0), instanceOf(InferenceChunkedTextEmbeddingFloatResults.class)); + var sparseResults = (InferenceChunkedTextEmbeddingFloatResults) chunkedResponse.get(0); + assertThat(sparseResults.chunks(), hasSize(numChunks)); + + gotResults.set(true); + }, ESTestCase::fail); + + // Create model using the word boundary chunker. + var model = new MultilingualE5SmallModel( + "foo", + TaskType.TEXT_EMBEDDING, + "e5", + new MultilingualE5SmallInternalServiceSettings(1, 1, "cross-platform", null), + new WordBoundaryChunkingSettings(wordsPerChunk, 0) + ); + + var latch = new CountDownLatch(1); + var latchedListener = new LatchedActionListener<>(resultsListener, latch); + + // For the given input we know how many requests will be made + service.chunkedInfer( + model, + null, + List.of(input), + Map.of(), + InputType.SEARCH, + new ChunkingOptions(null, null), + InferenceAction.Request.DEFAULT_TIMEOUT, + latchedListener + ); + + latch.await(); + assertTrue("Listener not called with results", gotResults.get()); + } + public void testParsePersistedConfig_Rerank() { // with task settings { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java index bab7bb52f928f..5a06308a3c8cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingCapacity.java @@ -17,7 +17,11 @@ public static Builder builder(ByteSizeValue nodeSize, ByteSizeValue tierSize) { } public static Builder from(AutoscalingCapacity autoscalingCapacity) { - return builder(autoscalingCapacity.node().memory(), autoscalingCapacity.total().memory()); + if (autoscalingCapacity == null) { + return builder(null, null); + } else { + return builder(autoscalingCapacity.node().memory(), autoscalingCapacity.total().memory()); + } } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java index dfe0e557f749d..0ff6aece95ab1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java @@ -809,7 +809,7 @@ static MlMemoryAutoscalingCapacity ensureScaleDown( MlMemoryAutoscalingCapacity scaleDownResult, MlMemoryAutoscalingCapacity currentCapacity ) { - if (scaleDownResult == null || currentCapacity == null) { + if (scaleDownResult == null || currentCapacity == null || currentCapacity.isUndetermined()) { return null; } MlMemoryAutoscalingCapacity newCapacity = MlMemoryAutoscalingCapacity.builder( diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java new file mode 100644 index 0000000000000..b6fc43e2a6e48 --- /dev/null +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/CrossClusterEsqlRCS1UnavailableRemotesIT.java @@ -0,0 +1,286 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.remotecluster; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.greaterThan; + +public class CrossClusterEsqlRCS1UnavailableRemotesIT extends AbstractRemoteClusterSecurityTestCase { + private static final AtomicBoolean SSL_ENABLED_REF = new AtomicBoolean(); + + static { + fulfillingCluster = ElasticsearchCluster.local() + .name("fulfilling-cluster") + .nodes(1) + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("remote_cluster.port", "0") + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_server.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key") + .setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt") + .setting("xpack.security.authc.token.enabled", "true") + .keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password") + .node(0, spec -> spec.setting("remote_cluster_server.enabled", "true")) + .build(); + + queryCluster = ElasticsearchCluster.local() + .name("query-cluster") + .module("x-pack-esql") + .module("x-pack-enrich") + .apply(commonClusterConfig) + .setting("xpack.ml.enabled", "false") + .setting("xpack.security.remote_cluster_client.ssl.enabled", () -> String.valueOf(SSL_ENABLED_REF.get())) + .build(); + } + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(fulfillingCluster).around(queryCluster); + + @Before + public void setupPreRequisites() throws IOException { + setupRolesAndPrivileges(); + loadData(); + } + + public void testEsqlRcs1UnavailableRemoteScenarios() throws Exception { + clusterShutDownWithRandomSkipUnavailable(); + remoteClusterShutdownWithSkipUnavailableTrue(); + remoteClusterShutdownWithSkipUnavailableFalse(); + } + + private void clusterShutDownWithRandomSkipUnavailable() throws Exception { + // skip_unavailable is set to a random boolean value. + // However, no clusters are stopped. Hence, we do not expect any other behaviour + // other than a 200-OK. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), randomBoolean()); + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + assertThat(columns.size(), is(4)); + assertThat(values.size(), is(9)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(2)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("skipped"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("successful")); + } + + @SuppressWarnings("unchecked") + private void remoteClusterShutdownWithSkipUnavailableTrue() throws Exception { + // Remote cluster is stopped and skip unavailable is set to true. + // We expect no exception and partial results from the remaining open cluster. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), true); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + Response response = client().performRequest(esqlRequest(query)); + + Map map = responseAsMap(response); + ArrayList columns = (ArrayList) map.get("columns"); + ArrayList values = (ArrayList) map.get("values"); + Map clusters = (Map) map.get("_clusters"); + Map clusterDetails = (Map) clusters.get("details"); + Map localClusterDetails = (Map) clusterDetails.get("(local)"); + Map remoteClusterDetails = (Map) clusterDetails.get("my_remote_cluster"); + + // Assert results obtained from the local cluster and that remote cluster was + // skipped. + assertOK(response); + assertThat((int) map.get("took"), greaterThan(0)); + + assertThat(columns.size(), is(2)); + assertThat(values.size(), is(5)); + + assertThat((int) clusters.get("total"), is(2)); + assertThat((int) clusters.get("successful"), is(1)); + assertThat((int) clusters.get("skipped"), is(1)); + assertThat((int) clusters.get("running"), is(0)); + assertThat((int) clusters.get("partial"), is(0)); + assertThat((int) clusters.get("failed"), is(0)); + + assertThat(clusterDetails.size(), is(2)); + assertThat((int) localClusterDetails.get("took"), greaterThan(0)); + assertThat(localClusterDetails.get("status"), is("successful")); + + assertThat((int) remoteClusterDetails.get("took"), greaterThan(0)); + assertThat(remoteClusterDetails.get("status"), is("skipped")); + + } catch (ResponseException r) { + throw new AssertionError(r); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void remoteClusterShutdownWithSkipUnavailableFalse() throws Exception { + // Remote cluster is stopped and skip_unavailable is set to false. + // Although the other cluster is open, we expect an Exception. + + configureRemoteCluster("my_remote_cluster", fulfillingCluster, true, randomBoolean(), false); + + try { + // Stop remote cluster. + fulfillingCluster.stop(true); + + // A simple query that targets our remote cluster. + String query = "FROM *,my_remote_cluster:* | LIMIT 10"; + ResponseException ex = expectThrows(ResponseException.class, () -> client().performRequest(esqlRequest(query))); + assertThat(ex.getMessage(), containsString("connect_transport_exception")); + } finally { + fulfillingCluster.start(); + closeFulfillingClusterClient(); + initFulfillingClusterClient(); + } + } + + private void setupRolesAndPrivileges() throws IOException { + var putUserRequest = new Request("PUT", "/_security/user/" + REMOTE_SEARCH_USER); + putUserRequest.setJsonEntity(""" + { + "password": "x-pack-test-password", + "roles" : ["remote_search"] + }"""); + assertOK(adminClient().performRequest(putUserRequest)); + + var putRoleOnRemoteClusterRequest = new Request("PUT", "/_security/role/" + REMOTE_SEARCH_ROLE); + putRoleOnRemoteClusterRequest.setJsonEntity(""" + { + "indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"] + } + ], + "remote_indices": [ + { + "names": ["points", "squares"], + "privileges": ["read", "read_cross_cluster", "create_index", "monitor"], + "clusters": ["my_remote_cluster"] + } + ] + }"""); + assertOK(adminClient().performRequest(putRoleOnRemoteClusterRequest)); + } + + private void loadData() throws IOException { + Request createIndex = new Request("PUT", "points"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "id": { "type": "integer" }, + "score": { "type": "integer" } + } + } + } + """); + assertOK(client().performRequest(createIndex)); + + Request bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": { "_index": "points" } } + { "id": 1, "score": 75} + { "index": { "_index": "points" } } + { "id": 2, "score": 125} + { "index": { "_index": "points" } } + { "id": 3, "score": 100} + { "index": { "_index": "points" } } + { "id": 4, "score": 50} + { "index": { "_index": "points" } } + { "id": 5, "score": 150} + """); + assertOK(client().performRequest(bulkRequest)); + + createIndex = new Request("PUT", "squares"); + createIndex.setJsonEntity(""" + { + "mappings": { + "properties": { + "num": { "type": "integer" }, + "square": { "type": "integer" } + } + } + } + """); + assertOK(performRequestAgainstFulfillingCluster(createIndex)); + + bulkRequest = new Request("POST", "/_bulk?refresh=true"); + bulkRequest.setJsonEntity(""" + { "index": {"_index": "squares"}} + { "num": 1, "square": 1 } + { "index": {"_index": "squares"}} + { "num": 2, "square": 4 } + { "index": {"_index": "squares"}} + { "num": 3, "square": 9 } + { "index": {"_index": "squares"}} + { "num": 4, "square": 16 } + """); + assertOK(performRequestAgainstFulfillingCluster(bulkRequest)); + } + + private Request esqlRequest(String query) throws IOException { + XContentBuilder body = JsonXContent.contentBuilder(); + + body.startObject(); + body.field("query", query); + body.field("include_ccs_metadata", true); + body.endObject(); + + Request request = new Request("POST", "_query"); + request.setJsonEntity(Strings.toString(body)); + + return request; + } +}