diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 7a6e5ac125aff..0883097e75aad 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.14.0 -lucene = 9.9.2 +lucene = 9.10.0 bundled_jdk_vendor = openjdk bundled_jdk = 21.0.2+13@f2283984656d49d69e91c558476027ac diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index bf539efaf3c30..54962ac241f75 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -433,7 +433,7 @@ private void commonNodeConfig() { if (node.getTestDistribution().equals(TestDistribution.INTEG_TEST)) { node.defaultConfig.put("xpack.security.enabled", "false"); } else { - if (node.getVersion().onOrAfter("7.16.0")) { + if (hasDeprecationIndexing(node)) { node.defaultConfig.put("cluster.deprecation_indexing.enabled", "false"); } } @@ -474,13 +474,17 @@ public void nextNodeToNextVersion() { commonNodeConfig(); nodeIndex += 1; if (node.getTestDistribution().equals(TestDistribution.DEFAULT)) { - if (node.getVersion().onOrAfter("7.16.0")) { + if (hasDeprecationIndexing(node)) { node.setting("cluster.deprecation_indexing.enabled", "false"); } } node.start(); } + private static boolean hasDeprecationIndexing(ElasticsearchNode node) { + return node.getVersion().onOrAfter("7.16.0") && node.getSettingKeys().contains("stateless.enabled") == false; + } + @Override public void extraConfigFile(String destination, File from) { nodes.all(node -> node.extraConfigFile(destination, from)); diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 86862769c70e3..6e4ffa8885fbf 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.9.2 -:lucene_version_path: 9_9_2 +:lucene_version: 9.10.0 +:lucene_version_path: 9_10_0 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/105578.yaml b/docs/changelog/105578.yaml new file mode 100644 index 0000000000000..1ffa0128c1d0a --- /dev/null +++ b/docs/changelog/105578.yaml @@ -0,0 +1,13 @@ +pr: 105578 +summary: Upgrade to Lucene 9.10.0 +area: Search +type: enhancement +issues: [] +highlight: + title: New Lucene 9.10 release + body: |- + - https://github.com/apache/lucene/pull/13090: Prevent humongous allocations in ScalarQuantizer when building quantiles. + - https://github.com/apache/lucene/pull/12962: Speedup concurrent multi-segment HNSW graph search + - https://github.com/apache/lucene/pull/13033: Range queries on numeric/date/ip fields now exit earlier on segments whose values don't intersect with the query range. This should especially help when there are other required clauses in the `bool` query and when the range filter is narrow, e.g. filtering on the last 5 minutes. + - https://github.com/apache/lucene/pull/13026: `bool` queries that mix `filter` and `should` clauses will now propagate minimum competitive scores through the `should` clauses. This should yield speedups when sorting by descending score. + notable: true diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 77572f707f4cb..7e0e53747834a 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -435,6 +435,8 @@ each node's <> at `$ES_TMPDIR/geoip-databases/>, <>) diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index dff7bb4a11ee4..5babb4def2320 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -70,6 +70,11 @@ The following tables lists all the defaults ISO formats supported: (separated by `T`), is optional. Examples: `yyyy-MM-dd'T'HH:mm:ss.SSSZ` or `yyyy-MM-dd`. + NOTE: When using `date_optional_time`, the parsing is lenient and will attempt to parse + numbers as a year (e.g. `292278994` will be parsed as a year). This can lead to unexpected results + when paired with a numeric focused format like `epoch_second` and `epoch_millis`. + It is recommended you use `strict_date_optional_time` when pairing with a numeric focused format. + [[strict-date-time-nanos]]`strict_date_optional_time_nanos`:: A generic ISO datetime parser, where the date must include the year at a minimum, and the time diff --git a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc index 249fddce9c416..b9bb36b21ea12 100644 --- a/docs/reference/search/search-your-data/semantic-search-inference.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-inference.asciidoc @@ -4,18 +4,21 @@ Semantic search with the {infer} API ++++ -The instructions in this tutorial shows you how to use the {infer} API with the -Open AI service to perform semantic search on your data. The following example -uses OpenAI's `text-embedding-ada-002` second generation embedding model. You -can use any OpenAI models, they are all supported by the {infer} API. +The instructions in this tutorial shows you how to use the {infer} API with +various services to perform semantic search on your data. The following examples +use Cohere's `embed-english-light-v3.0` model and OpenAI's +`text-embedding-ada-002` second generation embedding model. You can use any +Cohere and OpenAI models, they are all supported by the {infer} API. + +Click the name of the service you want to use on any of the widgets below to +review the corresponding instructions. [discrete] -[[infer-openai-requirements]] +[[infer-service-requirements]] ==== Requirements -An https://openai.com/[OpenAI account] is required to use the {infer} API with -the OpenAI service. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc[] [discrete] @@ -24,113 +27,30 @@ the OpenAI service. Create the {infer} task by using the <>: -[source,console] ------------------------------------------------------------- -PUT _inference/text_embedding/openai_embeddings <1> -{ - "service": "openai", - "service_settings": { - "api_key": "" <2> - }, - "task_settings": { - "model": "text-embedding-ada-002" <3> - } -} ------------------------------------------------------------- -// TEST[skip:TBD] -<1> The task type is `text_embedding` in the path. -<2> The API key of your OpenAI account. You can find your OpenAI API keys in -your OpenAI account under the -https://platform.openai.com/api-keys[API keys section]. You need to provide -your API key only once. The <> does not return your API -key. -<3> The name of the embedding model to use. You can find the list of OpenAI -embedding models -https://platform.openai.com/docs/guides/embeddings/embedding-models[here]. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-task-widget.asciidoc[] [discrete] -[[infer-openai-mappings]] +[[infer-service-mappings]] ==== Create the index mapping The mapping of the destination index - the index that contains the embeddings that the model will create based on your input text - must be created. The destination index must have a field with the <> -field type to index the output of the OpenAI model. +field type to index the output of the used model. -[source,console] --------------------------------------------------- -PUT openai-embeddings -{ - "mappings": { - "properties": { - "content_embedding": { <1> - "type": "dense_vector", <2> - "dims": 1536, <3> - "element_type": "float", - "similarity": "dot_product" <4> - }, - "content": { <5> - "type": "text" <6> - } - } - } -} --------------------------------------------------- -<1> The name of the field to contain the generated tokens. It must be refrenced -in the {infer} pipeline configuration in the next step. -<2> The field to contain the tokens is a `dense_vector` field. -<3> The output dimensions of the model. Find this value in the -https://platform.openai.com/docs/guides/embeddings/embedding-models[OpenAI documentation] -of the model you use. -<4> The faster` dot_product` function can be used to calculate similarity -because OpenAI embeddings are normalised to unit length. You can check the -https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use[OpenAI docs] -about which similarity function to use. -<5> The name of the field from which to create the sparse vector representation. -In this example, the name of the field is `content`. It must be referenced in -the {infer} pipeline configuration in the next step. -<6> The field type which is text in this example. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc[] [discrete] -[[infer-openai-inference-ingest-pipeline]] +[[infer-service-inference-ingest-pipeline]] ==== Create an ingest pipeline with an inference processor Create an <> with an -<> and use the OpenAI model you created -above to infer against the data that is being ingested in the -pipeline. +<> and use the model you created above to +infer against the data that is being ingested in the pipeline. -[source,console] --------------------------------------------------- -PUT _ingest/pipeline/openai_embeddings -{ - "processors": [ - { - "inference": { - "model_id": "openai_embeddings", <1> - "input_output": { <2> - "input_field": "content", - "output_field": "content_embedding" - } - } - } - ] -} --------------------------------------------------- -<1> The name of the inference model you created by using the -<>. -<2> Configuration object that defines the `input_field` for the {infer} process -and the `output_field` that will contain the {infer} results. - -//// -[source,console] ----- -DELETE _ingest/pipeline/openai_embeddings ----- -// TEST[continued] -//// +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc[] [discrete] @@ -157,32 +77,10 @@ you can see an index named `test-data` with 182469 documents. [[reindexing-data-infer]] ==== Ingest the data through the {infer} ingest pipeline -Create the embeddings from the text by reindexing the data throught the {infer} -pipeline that uses the OpenAI model as the inference model. +Create the embeddings from the text by reindexing the data through the {infer} +pipeline that uses the chosen model as the inference model. -[source,console] ----- -POST _reindex?wait_for_completion=false -{ - "source": { - "index": "test-data", - "size": 50 <1> - }, - "dest": { - "index": "openai-embeddings", - "pipeline": "openai_embeddings" - } -} ----- -// TEST[skip:TBD] -<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller -number makes the update of the reindexing process quicker which enables you to -follow the progress closely and detect errors early. - -NOTE: The -https://platform.openai.com/account/limits[rate limit of your OpenAI account] -may affect the throughput of the reindexing process. If this happens, change -`size` to `3` or a similar value in magnitude. +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc[] The call returns a task ID to monitor the progress: @@ -214,63 +112,4 @@ provide the query text and the model you have used to create the embeddings. NOTE: If you cancelled the reindexing process, you run the query only a part of the data which affects the quality of your results. -[source,console] --------------------------------------------------- -GET openai-embeddings/_search -{ - "knn": { - "field": "content_embedding", - "query_vector_builder": { - "text_embedding": { - "model_id": "openai_embeddings", - "model_text": "Calculate fuel cost" - } - }, - "k": 10, - "num_candidates": 100 - }, - "_source": [ - "id", - "content" - ] -} --------------------------------------------------- -// TEST[skip:TBD] - -As a result, you receive the top 10 documents that are closest in meaning to the -query from the `openai-embeddings` index sorted by their proximity to the query: - -[source,consol-result] --------------------------------------------------- -"hits": [ - { - "_index": "openai-embeddings", - "_id": "DDd5OowBHxQKHyc3TDSC", - "_score": 0.83704096, - "_source": { - "id": 862114, - "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." - } - }, - { - "_index": "openai-embeddings", - "_id": "ajd5OowBHxQKHyc3TDSC", - "_score": 0.8345704, - "_source": { - "id": 820622, - "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." - } - }, - { - "_index": "openai-embeddings", - "_id": "Djd5OowBHxQKHyc3TDSC", - "_score": 0.8327426, - "_source": { - "id": 8202683, - "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." - } - }, - (...) - ] --------------------------------------------------- -// NOTCONSOLE +include::{es-repo-dir}/tab-widgets/inference-api/infer-api-search-widget.asciidoc[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc new file mode 100644 index 0000000000000..44d2f60966caa --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-ingest-pipeline.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc new file mode 100644 index 0000000000000..a5a1910e8f8ef --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc @@ -0,0 +1,63 @@ +//// + +[source,console] +---- +DELETE _ingest/pipeline/*_embeddings +---- +// TEST +// TEARDOWN + +//// + +// tag::cohere[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/cohere_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "cohere_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference configuration you created by using the +<>. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/openai_embeddings +{ + "processors": [ + { + "inference": { + "model_id": "openai_embeddings", <1> + "input_output": { <2> + "input_field": "content", + "output_field": "content_embedding" + } + } + } + ] +} +-------------------------------------------------- +<1> The name of the inference configuration you created by using the +<>. +<2> Configuration object that defines the `input_field` for the {infer} process +and the `output_field` that will contain the {infer} results. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc new file mode 100644 index 0000000000000..336c8052c282f --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-mapping.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc new file mode 100644 index 0000000000000..4b70a1b84f45f --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-mapping.asciidoc @@ -0,0 +1,71 @@ +// tag::cohere[] + +[source,console] +-------------------------------------------------- +PUT cohere-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 384, <3> + "element_type": "float" + }, + "content": { <4> + "type": "text" <5> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. Find this value in the +https://docs.cohere.com/reference/embed[Cohere documentation] of the model you +use. +<4> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<5> The field type which is text in this example. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +-------------------------------------------------- +PUT openai-embeddings +{ + "mappings": { + "properties": { + "content_embedding": { <1> + "type": "dense_vector", <2> + "dims": 1536, <3> + "element_type": "float", + "similarity": "dot_product" <4> + }, + "content": { <5> + "type": "text" <6> + } + } + } +} +-------------------------------------------------- +<1> The name of the field to contain the generated tokens. It must be refrenced +in the {infer} pipeline configuration in the next step. +<2> The field to contain the tokens is a `dense_vector` field. +<3> The output dimensions of the model. Find this value in the +https://platform.openai.com/docs/guides/embeddings/embedding-models[OpenAI documentation] +of the model you use. +<4> The faster` dot_product` function can be used to calculate similarity +because OpenAI embeddings are normalised to unit length. You can check the +https://platform.openai.com/docs/guides/embeddings/which-distance-function-should-i-use[OpenAI docs] +about which similarity function to use. +<5> The name of the field from which to create the dense vector representation. +In this example, the name of the field is `content`. It must be referenced in +the {infer} pipeline configuration in the next step. +<6> The field type which is text in this example. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc new file mode 100644 index 0000000000000..a73e4d7d76fc1 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-reindex.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc new file mode 100644 index 0000000000000..92e781f8b5a8a --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-reindex.asciidoc @@ -0,0 +1,55 @@ +// tag::cohere[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "cohere-embeddings", + "pipeline": "cohere_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +NOTE: The +https://dashboard.cohere.com/billing[rate limit of your Cohere account] +may affect the throughput of the reindexing process. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +---- +POST _reindex?wait_for_completion=false +{ + "source": { + "index": "test-data", + "size": 50 <1> + }, + "dest": { + "index": "openai-embeddings", + "pipeline": "openai_embeddings" + } +} +---- +// TEST[skip:TBD] +<1> The default batch size for reindexing is 1000. Reducing `size` to a smaller +number makes the update of the reindexing process quicker which enables you to +follow the progress closely and detect errors early. + +NOTE: The +https://platform.openai.com/account/limits[rate limit of your OpenAI account] +may affect the throughput of the reindexing process. If this happens, change +`size` to `3` or a similar value in magnitude. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc new file mode 100644 index 0000000000000..d1b981158c11b --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-requirements.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc new file mode 100644 index 0000000000000..f0bed750b69c9 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-requirements.asciidoc @@ -0,0 +1,14 @@ +// tag::cohere[] + +A https://cohere.com/[Cohere account] is required to use the {infer} API with +the Cohere service. + +// end::cohere[] + + +// tag::openai[] + +An https://openai.com/[OpenAI account] is required to use the {infer} API with +the OpenAI service. + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc new file mode 100644 index 0000000000000..4433f2da067f1 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-search-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-search.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc new file mode 100644 index 0000000000000..0c71ab7cecbce --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-search.asciidoc @@ -0,0 +1,139 @@ +// tag::cohere[] + +[source,console] +-------------------------------------------------- +GET cohere-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "cohere_embeddings", + "model_text": "Calculate fuel cost" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `cohere-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "cohere-embeddings", + "_id": "-eFWCY4BECzWLnMZuI78", + "_score": 0.737484, + "_source": { + "id": 1690948, + "content": "Oxygen is supplied to the muscles via red blood cells. Red blood cells carry hemoglobin which oxygen bonds with as the hemoglobin rich blood cells pass through the blood vessels of the lungs.The now oxygen rich blood cells carry that oxygen to the cells that are demanding it, in this case skeletal muscle cells.ther ways in which muscles are supplied with oxygen include: 1 Blood flow from the heart is increased. 2 Blood flow to your muscles in increased. 3 Blood flow from nonessential organs is transported to working muscles." + } + }, + { + "_index": "cohere-embeddings", + "_id": "HuFWCY4BECzWLnMZuI_8", + "_score": 0.7176013, + "_source": { + "id": 1692482, + "content": "The thoracic cavity is separated from the abdominal cavity by the diaphragm. This is a broad flat muscle. (muscular) diaphragm The diaphragm is a muscle that separat…e the thoracic from the abdominal cavity. The pelvis is the lowest part of the abdominal cavity and it has no physical separation from it Diaphragm." + } + }, + { + "_index": "cohere-embeddings", + "_id": "IOFWCY4BECzWLnMZuI_8", + "_score": 0.7154432, + "_source": { + "id": 1692489, + "content": "Muscular Wall Separating the Abdominal and Thoracic Cavities; Thoracic Cavity of a Fetal Pig; In Mammals the Diaphragm Separates the Abdominal Cavity from the" + } + }, + { + "_index": "cohere-embeddings", + "_id": "C-FWCY4BECzWLnMZuI_8", + "_score": 0.695313, + "_source": { + "id": 1691493, + "content": "Burning, aching, tenderness and stiffness are just some descriptors of the discomfort you may feel in the muscles you exercised one to two days ago.For the most part, these sensations you experience after exercise are collectively known as delayed onset muscle soreness.urning, aching, tenderness and stiffness are just some descriptors of the discomfort you may feel in the muscles you exercised one to two days ago." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::cohere[] + + +// tag::openai[] + +[source,console] +-------------------------------------------------- +GET openai-embeddings/_search +{ + "knn": { + "field": "content_embedding", + "query_vector_builder": { + "text_embedding": { + "model_id": "openai_embeddings", + "model_text": "Calculate fuel cost" + } + }, + "k": 10, + "num_candidates": 100 + }, + "_source": [ + "id", + "content" + ] +} +-------------------------------------------------- +// TEST[skip:TBD] + +As a result, you receive the top 10 documents that are closest in meaning to the +query from the `openai-embeddings` index sorted by their proximity to the query: + +[source,consol-result] +-------------------------------------------------- +"hits": [ + { + "_index": "openai-embeddings", + "_id": "DDd5OowBHxQKHyc3TDSC", + "_score": 0.83704096, + "_source": { + "id": 862114, + "body": "How to calculate fuel cost for a road trip. By Tara Baukus Mello • Bankrate.com. Dear Driving for Dollars, My family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost.It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes.y family is considering taking a long road trip to finish off the end of the summer, but I'm a little worried about gas prices and our overall fuel cost. It doesn't seem easy to calculate since we'll be traveling through many states and we are considering several routes." + } + }, + { + "_index": "openai-embeddings", + "_id": "ajd5OowBHxQKHyc3TDSC", + "_score": 0.8345704, + "_source": { + "id": 820622, + "body": "Home Heating Calculator. Typically, approximately 50% of the energy consumed in a home annually is for space heating. When deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important.This calculator can help you estimate the cost of fuel for different heating appliances.hen deciding on a heating system, many factors will come into play: cost of fuel, installation cost, convenience and life style are all important. This calculator can help you estimate the cost of fuel for different heating appliances." + } + }, + { + "_index": "openai-embeddings", + "_id": "Djd5OowBHxQKHyc3TDSC", + "_score": 0.8327426, + "_source": { + "id": 8202683, + "body": "Fuel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel.If you are paying $4 per gallon, the trip would cost you $200.Most boats have much larger gas tanks than cars.uel is another important cost. This cost will depend on your boat, how far you travel, and how fast you travel. A 33-foot sailboat traveling at 7 knots should be able to travel 300 miles on 50 gallons of diesel fuel." + } + }, + (...) + ] +-------------------------------------------------- +// NOTCONSOLE + +// end::openai[] \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc new file mode 100644 index 0000000000000..bc54bf6b14ddf --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-task-widget.asciidoc @@ -0,0 +1,39 @@ +++++ +
+
+ + +
+
+++++ + +include::infer-api-task.asciidoc[tag=cohere] + +++++ +
+ +
+++++ \ No newline at end of file diff --git a/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc new file mode 100644 index 0000000000000..3395fea9cc053 --- /dev/null +++ b/docs/reference/tab-widgets/inference-api/infer-api-task.asciidoc @@ -0,0 +1,56 @@ +// tag::cohere[] + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/cohere_embeddings <1> +{ + "service": "cohere", + "service_settings": { + "api_key": "", <2> + "model_id": "embed-english-light-v3.0", <3> + "embedding_type": "int8" + }, + "task_settings": { + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path. +<2> The API key of your Cohere account. You can find your API keys in your +Cohere dashboard under the +https://dashboard.cohere.com/api-keys[API keys section]. You need to provide +your API key only once. The <> does not return your API +key. +<3> The name of the embedding model to use. You can find the list of Cohere +embedding models https://docs.cohere.com/reference/embed[here]. + +// end::cohere[] + + +// tag::openai[] + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/openai_embeddings <1> +{ + "service": "openai", + "service_settings": { + "api_key": "", <2> + "model_id": "text-embedding-ada-002" <3> + }, + "task_settings": { + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> The task type is `text_embedding` in the path. +<2> The API key of your OpenAI account. You can find your OpenAI API keys in +your OpenAI account under the +https://platform.openai.com/api-keys[API keys section]. You need to provide +your API key only once. The <> does not return your API +key. +<3> The name of the embedding model to use. You can find the list of OpenAI +embedding models +https://platform.openai.com/docs/guides/embeddings/embedding-models[here]. + +// end::openai[] \ No newline at end of file diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 648c7260256dd..a9d24d4d50b17 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2653,124 +2653,124 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java index e33b1fdcfa57a..b772e0bb347e2 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java @@ -164,7 +164,7 @@ public void testPutLifecycle() throws Exception { ).get(); assertThat(response.getDataStreamLifecycles().size(), equalTo(1)); assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("my-data-stream")); - assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getEffectiveDataRetention(), equalTo(dataRetention)); + assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getDataStreamRetention(), equalTo(dataRetention)); assertThat(response.getDataStreamLifecycles().get(0).lifecycle().isEnabled(), equalTo(true)); } @@ -189,7 +189,7 @@ public void testPutLifecycle() throws Exception { ).get(); assertThat(response.getDataStreamLifecycles().size(), equalTo(1)); assertThat(response.getDataStreamLifecycles().get(0).dataStreamName(), equalTo("my-data-stream")); - assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getEffectiveDataRetention(), equalTo(dataRetention)); + assertThat(response.getDataStreamLifecycles().get(0).lifecycle().getDataStreamRetention(), equalTo(dataRetention)); assertThat(response.getDataStreamLifecycles().get(0).lifecycle().isEnabled(), equalTo(false)); } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index 471622489d9b2..a497eed121b0c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -118,7 +118,7 @@ public void testExplainLifecycle() throws Exception { assertThat(explainIndex.isManagedByLifecycle(), is(true)); assertThat(explainIndex.getIndexCreationDate(), notNullValue()); assertThat(explainIndex.getLifecycle(), notNullValue()); - assertThat(explainIndex.getLifecycle().getEffectiveDataRetention(), nullValue()); + assertThat(explainIndex.getLifecycle().getDataStreamRetention(), nullValue()); if (internalCluster().numDataNodes() > 1) { // If the number of nodes is 1 then the cluster will be yellow so forcemerge will report an error if it has run assertThat(explainIndex.getError(), nullValue()); @@ -175,7 +175,7 @@ public void testExplainLifecycle() throws Exception { assertThat(explainIndex.isManagedByLifecycle(), is(true)); assertThat(explainIndex.getIndexCreationDate(), notNullValue()); assertThat(explainIndex.getLifecycle(), notNullValue()); - assertThat(explainIndex.getLifecycle().getEffectiveDataRetention(), nullValue()); + assertThat(explainIndex.getLifecycle().getDataStreamRetention(), nullValue()); if (explainIndex.getIndex().equals(DataStream.getDefaultBackingIndexName(dataStreamName, 1))) { // first generation index was rolled over @@ -243,7 +243,7 @@ public void testExplainLifecycleForIndicesWithErrors() throws Exception { assertThat(explainIndex.isManagedByLifecycle(), is(true)); assertThat(explainIndex.getIndexCreationDate(), notNullValue()); assertThat(explainIndex.getLifecycle(), notNullValue()); - assertThat(explainIndex.getLifecycle().getEffectiveDataRetention(), nullValue()); + assertThat(explainIndex.getLifecycle().getDataStreamRetention(), nullValue()); assertThat(explainIndex.getRolloverDate(), nullValue()); assertThat(explainIndex.getTimeSinceRollover(System::currentTimeMillis), nullValue()); // index has not been rolled over yet diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 8b15d6a4b7bdf..1b875c28f7f43 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -822,38 +822,40 @@ private void maybeExecuteRollover(ClusterState state, DataStream dataStream) { * @return The set of indices that delete requests have been sent for */ private Set maybeExecuteRetention(ClusterState state, DataStream dataStream, Set indicesToExcludeForRemainingRun) { - TimeValue retention = getRetentionConfiguration(dataStream); + Metadata metadata = state.metadata(); + List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier); + if (backingIndicesOlderThanRetention.isEmpty()) { + return Set.of(); + } Set indicesToBeRemoved = new HashSet<>(); - if (retention != null) { - Metadata metadata = state.metadata(); - List backingIndicesOlderThanRetention = dataStream.getIndicesPastRetention(metadata::index, nowSupplier); - - for (Index index : backingIndicesOlderThanRetention) { - if (indicesToExcludeForRemainingRun.contains(index) == false) { - IndexMetadata backingIndex = metadata.index(index); - assert backingIndex != null : "the data stream backing indices must exist"; - - IndexMetadata.DownsampleTaskStatus downsampleStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndex.getSettings()); - // we don't want to delete the source index if they have an in-progress downsampling operation because the - // target downsample index will remain in the system as a standalone index - if (downsampleStatus.equals(UNKNOWN)) { - indicesToBeRemoved.add(index); - - // there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request) - // let's start simple and reevaluate - String indexName = backingIndex.getIndex().getName(); - deleteIndexOnce(indexName, "the lapsed [" + retention + "] retention period"); - } else { - // there's an opportunity here to cancel downsampling and delete the source index now - logger.trace( - "Data stream lifecycle skips deleting index [{}] even though its retention period [{}] has lapsed " - + "because there's a downsampling operation currently in progress for this index. Current downsampling " - + "status is [{}]. When downsampling completes, DSL will delete this index.", - index.getName(), - retention, - downsampleStatus - ); - } + // We know that there is lifecycle and retention because there are indices to be deleted + assert dataStream.getLifecycle() != null; + TimeValue effectiveDataRetention = dataStream.getLifecycle().getEffectiveDataRetention(); + for (Index index : backingIndicesOlderThanRetention) { + if (indicesToExcludeForRemainingRun.contains(index) == false) { + IndexMetadata backingIndex = metadata.index(index); + assert backingIndex != null : "the data stream backing indices must exist"; + + IndexMetadata.DownsampleTaskStatus downsampleStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndex.getSettings()); + // we don't want to delete the source index if they have an in-progress downsampling operation because the + // target downsample index will remain in the system as a standalone index + if (downsampleStatus.equals(UNKNOWN)) { + indicesToBeRemoved.add(index); + + // there's an opportunity here to batch the delete requests (i.e. delete 100 indices / request) + // let's start simple and reevaluate + String indexName = backingIndex.getIndex().getName(); + deleteIndexOnce(indexName, "the lapsed [" + effectiveDataRetention + "] retention period"); + } else { + // there's an opportunity here to cancel downsampling and delete the source index now + logger.trace( + "Data stream lifecycle skips deleting index [{}] even though its retention period [{}] has lapsed " + + "because there's a downsampling operation currently in progress for this index. Current downsampling " + + "status is [{}]. When downsampling completes, DSL will delete this index.", + index.getName(), + effectiveDataRetention, + downsampleStatus + ); } } } @@ -1222,14 +1224,6 @@ private static boolean isForceMergeComplete(IndexMetadata backingIndex) { return customMetadata != null && customMetadata.containsKey(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY); } - @Nullable - static TimeValue getRetentionConfiguration(DataStream dataStream) { - if (dataStream.getLifecycle() == null) { - return null; - } - return dataStream.getLifecycle().getEffectiveDataRetention(); - } - /** * @return the duration of the last run in millis or null if the service hasn't completed a run yet. */ diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index e7339cc3f334a..d1e07aacaddce 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -151,7 +151,7 @@ public void testLifecycleComposition() { DataStreamLifecycle result = composeDataLifecycles(lifecycles); // Defaults to true assertThat(result.isEnabled(), equalTo(true)); - assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDataStreamRetention(), equalTo(lifecycle.getDataStreamRetention())); assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If the last lifecycle is missing a property (apart from enabled) we keep the latest from the previous ones @@ -165,7 +165,7 @@ public void testLifecycleComposition() { List lifecycles = List.of(lifecycle, new DataStreamLifecycle()); DataStreamLifecycle result = composeDataLifecycles(lifecycles); assertThat(result.isEnabled(), equalTo(true)); - assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDataStreamRetention(), equalTo(lifecycle.getDataStreamRetention())); assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If both lifecycle have all properties, then the latest one overwrites all the others @@ -183,7 +183,7 @@ public void testLifecycleComposition() { List lifecycles = List.of(lifecycle1, lifecycle2); DataStreamLifecycle result = composeDataLifecycles(lifecycles); assertThat(result.isEnabled(), equalTo(lifecycle2.isEnabled())); - assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle2.getEffectiveDataRetention())); + assertThat(result.getDataStreamRetention(), equalTo(lifecycle2.getDataStreamRetention())); assertThat(result.getDownsamplingRounds(), equalTo(lifecycle2.getDownsamplingRounds())); } } diff --git a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java index 465f17eca5532..2b3bab21e8ae6 100644 --- a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java +++ b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java @@ -13,6 +13,7 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; @@ -39,9 +40,15 @@ public ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate super(testCandidate); } + @UpdateForV9 // remove restCompat check @ParametersFactory public static Iterable parameters() throws Exception { - return createParameters(); + String restCompatProperty = System.getProperty("tests.restCompat"); + if ("true".equals(restCompatProperty)) { + return createParametersWithLegacyNodeSelectorSupport(); + } else { + return createParameters(); + } } @Override diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml index 1dc35c165b4e0..a000a9eac16ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml @@ -1,8 +1,8 @@ --- "cluster health basic test": - skip: - version: "- 8.3.99" - reason: "health was only added in 8.2.0, and master_is_stable in 8.4.0" + version: "- 8.6.99" + reason: "health was added in 8.2.0, master_is_stable in 8.4.0, and REST API updated in 8.7" - do: health_report: { } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index fc747f401b11d..ef121411d8351 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -87,7 +87,7 @@ --- "Put mappings with explicit _doc type bwc": - skip: - version: "8.0.0 - " #TODO: add "mixed" to skip test for mixed cluster/upgrade tests + version: "8.0.0 - " reason: "old deprecation message for pre 8.0" features: "node_selector" - do: @@ -96,7 +96,7 @@ - do: node_selector: - version: " - 7.99.99" #TODO: OR replace with "non_current" here + version: "original" catch: bad_request indices.put_mapping: index: test_index diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java index 53075e31cd6f9..c9a6cfaf754c6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RandomSamplerIT.java @@ -85,6 +85,7 @@ public void setupSuiteScopeCluster() throws Exception { ensureSearchable(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105839") public void testRandomSamplerConsistentSeed() { double[] sampleMonotonicValue = new double[1]; double[] sampleNumericValue = new double[1]; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index f7b5fec8a2dd5..ce3446317400d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Predicates; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; @@ -545,8 +546,7 @@ private Comparator buildComparator() { private Predicate buildAfterPredicate() { if (after == null) { - // TODO use constant when https://github.com/elastic/elasticsearch/pull/105881 merged - return snapshotInfo -> true; + return Predicates.always(); } assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 14de79636be0d..1bcfdba1d16f4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -705,7 +705,7 @@ public DataStream snapshot(Collection indicesInSnapshot) { * is treated differently for the write index (i.e. they first need to be rolled over) */ public List getIndicesPastRetention(Function indexMetadataSupplier, LongSupplier nowSupplier) { - if (lifecycle == null || lifecycle.getEffectiveDataRetention() == null) { + if (lifecycle == null || lifecycle.isEnabled() == false || lifecycle.getEffectiveDataRetention() == null) { return List.of(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 215ed515748ab..b4a3a1eb3502a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -134,6 +134,16 @@ public boolean isEnabled() { */ @Nullable public TimeValue getEffectiveDataRetention() { + return getDataStreamRetention(); + } + + /** + * The least amount of time data the data stream is requesting es to keep the data. + * NOTE: this can be overriden by the {@link DataStreamLifecycle#getEffectiveDataRetention()}. + * @return the time period or null, null represents that data should never be deleted. + */ + @Nullable + public TimeValue getDataStreamRetention() { return dataRetention == null ? null : dataRetention.value; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index ae6185cdcc6b6..0ddcef2ac3a08 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -101,6 +101,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion NEW_INDEXVERSION_FORMAT = def(8_501_00_0, Version.LUCENE_9_9_1); public static final IndexVersion UPGRADE_LUCENE_9_9_2 = def(8_502_00_0, Version.LUCENE_9_9_2); public static final IndexVersion TIME_SERIES_ID_HASHING = def(8_502_00_1, Version.LUCENE_9_9_2); + public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_00_0, Version.LUCENE_9_10_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index 1005f8f486beb..e63d5ef87973b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -22,7 +22,7 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; @@ -296,14 +296,13 @@ private TopDocs searchOperations(FieldDoc after, boolean accurateTotalHits) thro final Query rangeQuery = rangeQuery(Math.max(fromSeqNo, lastSeenSeqNo), toSeqNo, indexVersionCreated); assert accurateTotalHits == false || after == null : "accurate total hits is required by the first batch only"; final SortField sortBySeqNo = new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG); - final TopFieldCollector collector = TopFieldCollector.create( + TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager( new Sort(sortBySeqNo), searchBatchSize, after, accurateTotalHits ? Integer.MAX_VALUE : 0 ); - indexSearcher.search(rangeQuery, collector); - return collector.topDocs(); + return indexSearcher.search(rangeQuery, topFieldCollectorManager); } private Translog.Operation readDocAsOp(int docIndex) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index 708d042c91bf9..e5eeac72927c0 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -161,6 +161,7 @@ private static class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo( @@ -179,6 +180,7 @@ private static class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo( @@ -197,6 +199,7 @@ private static class TranslogLeafReader extends LeafReader { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); private static final Set TRANSLOG_FIELD_NAMES = Set.of(SourceFieldMapper.NAME, RoutingFieldMapper.NAME, IdFieldMapper.NAME); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java index 49934776bc4a3..db90c8f052a5e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentLeafReader.java @@ -291,6 +291,7 @@ private static FieldInfo fieldInfo(String name) { 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 3beec89853b76..ce3031d4cddf8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -43,10 +43,11 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, if (scorer == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + grow(1); scorer.score(new LeafCollector() { @Override public void collect(int doc) throws IOException { - collectBucket(sub, doc, 0); + collectExistingBucket(sub, doc, 0); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java index a279b8270cd57..276e0bbf300d2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregator.java @@ -101,10 +101,11 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt } // No sampling is being done, collect all docs if (probability >= 1.0) { + grow(1); return new LeafBucketCollector() { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - collectBucket(sub, doc, 0); + collectExistingBucket(sub, doc, 0); } }; } @@ -117,11 +118,12 @@ public void collect(int doc, long owningBucketOrd) throws IOException { final DocIdSetIterator docIt = scorer.iterator(); final Bits liveDocs = aggCtx.getLeafReaderContext().reader().getLiveDocs(); try { + grow(1); // Iterate every document provided by the scorer iterator for (int docId = docIt.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docIt.nextDoc()) { // If liveDocs is null, that means that every doc is a live doc, no need to check if it has been deleted or not if (liveDocs == null || liveDocs.get(docIt.docID())) { - collectBucket(sub, docIt.docID(), 0); + collectExistingBucket(sub, docIt.docID(), 0); } } // This collector could throw `CollectionTerminatedException` if the last leaf collector has stopped collecting diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 44dddc119925f..8c40a283844b4 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -17,7 +17,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -203,7 +203,7 @@ private static void executeKnnVectorQuery(SearchContext context) throws IOExcept static DfsKnnResults singleKnnSearch(Query knnQuery, int k, Profilers profilers, ContextIndexSearcher searcher, String nestedPath) throws IOException { - CollectorManager topDocsCollectorManager = TopScoreDocCollector.createSharedManager( + CollectorManager topDocsCollectorManager = new TopScoreDocCollectorManager( k, null, Integer.MAX_VALUE diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index 86a01756d247e..7fd09d3ddfdf1 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -35,9 +35,9 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.search.TopFieldDocs; -import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.elasticsearch.action.search.MaxScoreCollector; @@ -413,14 +413,9 @@ private static class WithHits extends QueryPhaseCollectorManager { } } if (sortAndFormats == null) { - this.topDocsManager = TopScoreDocCollector.createSharedManager(numHits, searchAfter, hitCountThreshold); + this.topDocsManager = new TopScoreDocCollectorManager(numHits, searchAfter, hitCountThreshold); } else { - this.topDocsManager = TopFieldCollector.createSharedManager( - sortAndFormats.sort, - numHits, - (FieldDoc) searchAfter, - hitCountThreshold - ); + this.topDocsManager = new TopFieldCollectorManager(sortAndFormats.sort, numHits, (FieldDoc) searchAfter, hitCountThreshold); } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java index 091ce6f8a0f6d..05cf52fd23f24 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnByteVectorQuery.java @@ -8,35 +8,17 @@ package org.elasticsearch.search.vectors; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.util.Bits; import org.elasticsearch.search.profile.query.QueryProfiler; -import java.io.IOException; - public class ESKnnByteVectorQuery extends KnnByteVectorQuery implements ProfilingQuery { - private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS; private long vectorOpsCount; - private final byte[] target; public ESKnnByteVectorQuery(String field, byte[] target, int k, Query filter) { super(field, target, k, filter); - this.target = target; - } - - @Override - protected TopDocs approximateSearch(LeafReaderContext context, Bits acceptDocs, int visitedLimit) throws IOException { - // We increment visit limit by one to bypass a fencepost error in the collector - if (visitedLimit < Integer.MAX_VALUE) { - visitedLimit += 1; - } - TopDocs results = context.reader().searchNearestVectors(field, target, k, acceptDocs, visitedLimit); - return results != null ? results : NO_RESULTS; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java index 4fa4db1f4ea95..e83a90a3c4df8 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ESKnnFloatVectorQuery.java @@ -8,24 +8,16 @@ package org.elasticsearch.search.vectors; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.TopDocsCollector; -import org.apache.lucene.util.Bits; import org.elasticsearch.search.profile.query.QueryProfiler; -import java.io.IOException; - public class ESKnnFloatVectorQuery extends KnnFloatVectorQuery implements ProfilingQuery { - private static final TopDocs NO_RESULTS = TopDocsCollector.EMPTY_TOPDOCS; private long vectorOpsCount; - private final float[] target; public ESKnnFloatVectorQuery(String field, float[] target, int k, Query filter) { super(field, target, k, filter); - this.target = target; } @Override @@ -35,16 +27,6 @@ protected TopDocs mergeLeafResults(TopDocs[] perLeafResults) { return topK; } - @Override - protected TopDocs approximateSearch(LeafReaderContext context, Bits acceptDocs, int visitedLimit) throws IOException { - // We increment visit limit by one to bypass a fencepost error in the collector - if (visitedLimit < Integer.MAX_VALUE) { - visitedLimit += 1; - } - TopDocs results = context.reader().searchNearestVectors(field, target, k, acceptDocs, visitedLimit); - return results != null ? results : NO_RESULTS; - } - @Override public void profile(QueryProfiler queryProfiler) { queryProfiler.setVectorOpsCount(vectorOpsCount); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index 1c4cb8c0681ff..9f7d6b49b0844 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -1249,7 +1249,7 @@ public void testGetIndicesPastRetentionWithOriginationDate() { creationAndRolloverTimes, settings(IndexVersion.current()), new DataStreamLifecycle() { - public TimeValue getEffectiveDataRetention() { + public TimeValue getDataStreamRetention() { return testRetentionReference.get(); } } diff --git a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java index 8ad4593602a25..bec0f83f78674 100644 --- a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorSearchAfterTests.java @@ -18,7 +18,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; @@ -107,12 +107,11 @@ private > void assertSearchCollapse( ? SinglePassGroupingCollector.createNumeric("field", fieldType, sort, expectedNumGroups, after) : SinglePassGroupingCollector.createKeyword("field", fieldType, sort, expectedNumGroups, after); - TopFieldCollector topFieldCollector = TopFieldCollector.create(sort, totalHits, after, Integer.MAX_VALUE); + TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager(sort, totalHits, after, Integer.MAX_VALUE); Query query = new MatchAllDocsQuery(); searcher.search(query, collapsingCollector); - searcher.search(query, topFieldCollector); + TopFieldDocs topDocs = searcher.search(query, topFieldCollectorManager); TopFieldGroups collapseTopFieldDocs = collapsingCollector.getTopGroups(0); - TopFieldDocs topDocs = topFieldCollector.topDocs(); assertEquals(sortField.getField(), collapseTopFieldDocs.field); assertEquals(totalHits, collapseTopFieldDocs.totalHits.value); assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length); diff --git a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java index 8dd7ed9c21896..bb4b3f42fde85 100644 --- a/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/grouping/SinglePassGroupingCollectorTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.search.TopFieldCollector; +import org.apache.lucene.search.TopFieldCollectorManager; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; @@ -132,12 +132,11 @@ private > void assertSearchCollapse( ); } - TopFieldCollector topFieldCollector = TopFieldCollector.create(sort, totalHits, Integer.MAX_VALUE); + TopFieldCollectorManager topFieldCollectorManager = new TopFieldCollectorManager(sort, totalHits, Integer.MAX_VALUE); Query query = new MatchAllDocsQuery(); searcher.search(query, collapsingCollector); - searcher.search(query, topFieldCollector); + TopFieldDocs topDocs = searcher.search(query, topFieldCollectorManager); TopFieldGroups collapseTopFieldDocs = collapsingCollector.getTopGroups(0); - TopFieldDocs topDocs = topFieldCollector.topDocs(); assertEquals(collapseField.getField(), collapseTopFieldDocs.field); assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length); assertEquals(totalHits, collapseTopFieldDocs.totalHits.value); diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java index 5cfe368a9a392..fc8b9706d387a 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/ProfileCollectorManagerTests.java @@ -19,6 +19,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.search.DummyTotalHitCountCollector; @@ -121,12 +122,12 @@ public Integer reduce(Collection collectors) { */ public void testManagerWithSearcher() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(10, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(10, null, 1000); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(10, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(10, null, 1000); String profileReason = "profiler_reason"; ProfileCollectorManager profileCollectorManager = new ProfileCollectorManager<>(topDocsManager, profileReason); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), profileCollectorManager); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java index b466101be07d8..f222e697488d2 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseCollectorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TopScoreDocCollectorManager; import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.store.Directory; @@ -108,7 +109,7 @@ public void testNegativeTerminateAfter() { public void testTopDocsOnly() throws IOException { { - CollectorManager topScoreDocManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topScoreDocManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topScoreDocManager, null, @@ -121,7 +122,7 @@ public void testTopDocsOnly() throws IOException { assertEquals(numDocs, result.topDocs.totalHits.value); } { - CollectorManager topScoreDocManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topScoreDocManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topScoreDocManager, null, @@ -137,7 +138,7 @@ public void testTopDocsOnly() throws IOException { public void testWithAggs() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -152,7 +153,7 @@ public void testWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -170,7 +171,7 @@ public void testWithAggs() throws IOException { public void testPostFilterTopDocsOnly() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); CollectorManager> manager = createCollectorManager( @@ -185,7 +186,7 @@ public void testPostFilterTopDocsOnly() throws IOException { assertEquals(numField2Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); TermQuery termQuery = new TermQuery(new Term("field1", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); CollectorManager> manager = createCollectorManager( @@ -203,7 +204,7 @@ public void testPostFilterTopDocsOnly() throws IOException { public void testPostFilterWithAggs() throws IOException { { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); TermQuery termQuery = new TermQuery(new Term("field1", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); @@ -220,7 +221,7 @@ public void testPostFilterWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); @@ -247,18 +248,14 @@ public void testMinScoreTopDocsOnly() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField2Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -271,7 +268,7 @@ public void testMinScoreTopDocsOnly() throws IOException { assertEquals(numField2Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -284,7 +281,7 @@ public void testMinScoreTopDocsOnly() throws IOException { assertEquals(numDocs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -306,18 +303,14 @@ public void testMinScoreWithAggs() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField2Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -333,7 +326,7 @@ public void testMinScoreWithAggs() throws IOException { assertEquals(numField2Docs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -348,7 +341,7 @@ public void testMinScoreWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -374,18 +367,14 @@ public void testPostFilterAndMinScoreTopDocsOnly() throws IOException { TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField3Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -398,7 +387,7 @@ public void testPostFilterAndMinScoreTopDocsOnly() throws IOException { assertEquals(numField2AndField3Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -411,7 +400,7 @@ public void testPostFilterAndMinScoreTopDocsOnly() throws IOException { assertEquals(numField2Docs, result.topDocs.totalHits.value); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -435,18 +424,14 @@ public void testPostFilterAndMinScoreWithAggs() throws IOException { TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; thresholdScore = topDocs.scoreDocs[numField3Docs].score; } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -461,7 +446,7 @@ public void testPostFilterAndMinScoreWithAggs() throws IOException { assertEquals(numField3Docs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -476,7 +461,7 @@ public void testPostFilterAndMinScoreWithAggs() throws IOException { assertEquals(numDocs, result.aggs.intValue()); } { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -635,18 +620,14 @@ public void testTerminateAfterTopDocsOnlyWithMinScore() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, null, @@ -667,18 +648,14 @@ public void testTerminateAfterWithAggsAndMinScore() throws IOException { .add(new BoostQuery(new TermQuery(new Term("field2", "value")), 200f), BooleanClause.Occur.SHOULD) .build(); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField2Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField2Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, @@ -703,18 +680,14 @@ public void testTerminateAfterAndPostFilterAndMinScoreTopDocsOnly() throws IOExc TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2AndField3Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager> manager = createCollectorManager( topDocsManager, filterWeight, @@ -737,18 +710,14 @@ public void testTerminateAfterAndPostFilterAndMinScoreWithAggs() throws IOExcept TermQuery termQuery = new TermQuery(new Term("field2", "value")); Weight filterWeight = termQuery.createWeight(searcher, ScoreMode.TOP_DOCS, 1f); { - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager( - numField3Docs + 1, - null, - 1000 - ); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(numField3Docs + 1, null, 1000); TopDocs topDocs = searcher.search(booleanQuery, topDocsManager); assertEquals(numDocs, topDocs.totalHits.value); maxScore = topDocs.scoreDocs[0].score; } { int terminateAfter = randomIntBetween(1, numField2AndField3Docs - 1); - CollectorManager topDocsManager = TopScoreDocCollector.createSharedManager(1, null, 1000); + CollectorManager topDocsManager = new TopScoreDocCollectorManager(1, null, 1000); CollectorManager aggsManager = DummyTotalHitCountCollector.createManager(); CollectorManager> manager = createCollectorManager( topDocsManager, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index d1a07cd0ee089..d4c6f8f3df873 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -146,7 +146,8 @@ public FieldInfo getFieldInfoWithName(String name) { 1, VectorEncoding.BYTE, VectorSimilarityFunction.COSINE, - randomBoolean() + randomBoolean(), + false ); } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 4be9481df58b1..7d8d1175385a1 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.ClasspathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.TestFeatureService; @@ -35,8 +36,10 @@ import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite; +import org.elasticsearch.test.rest.yaml.section.DoSection; import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.junit.AfterClass; @@ -61,6 +64,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -230,6 +234,28 @@ public static void closeClient() throws IOException { } } + /** + * Create parameters for this parameterized test. + * Enables support for parsing the legacy version-based node_selector format. + */ + @Deprecated + @UpdateForV9 + public static Iterable createParametersWithLegacyNodeSelectorSupport() throws Exception { + var executableSectionRegistry = new NamedXContentRegistry( + Stream.concat( + ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS.stream().filter(entry -> entry.name.getPreferredName().equals("do") == false), + Stream.of( + new NamedXContentRegistry.Entry( + ExecutableSection.class, + new ParseField("do"), + DoSection::parseWithLegacyNodeSelectorSupport + ) + ) + ).toList() + ); + return createParameters(executableSectionRegistry, null); + } + /** * Create parameters for this parameterized test. Uses the * {@link ExecutableSection#XCONTENT_REGISTRY list} of executable sections diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 65a23bd376212..e5f46ff135171 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -177,7 +177,7 @@ private static Stream validateExecutableSections( .filter(section -> false == section.getExpectedWarningHeaders().isEmpty()) .filter(section -> false == hasYamlRunnerFeature("warnings", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] \ + attempted to add a [do] with a [warnings] section without a corresponding ["requires": "test_runner_features": "warnings"] \ so runners that do not support the [warnings] section can skip the test at line [%d]\ """, section.getLocation().lineNumber())); @@ -190,7 +190,7 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("warnings_regex", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [warnings_regex] section without a corresponding \ - ["skip": "features": "warnings_regex"] so runners that do not support the [warnings_regex] \ + ["requires": "test_runner_features": "warnings_regex"] so runners that do not support the [warnings_regex] \ section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -204,7 +204,7 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("allowed_warnings", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [allowed_warnings] section without a corresponding \ - ["skip": "features": "allowed_warnings"] so runners that do not support the [allowed_warnings] \ + ["requires": "test_runner_features": "allowed_warnings"] so runners that do not support the [allowed_warnings] \ section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -218,8 +218,8 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("allowed_warnings_regex", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding \ - ["skip": "features": "allowed_warnings_regex"] so runners that do not support the [allowed_warnings_regex] \ - section can skip the test at line [%d]\ + ["requires": "test_runner_features": "allowed_warnings_regex"] so runners that do not support the \ + [allowed_warnings_regex] section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -232,7 +232,7 @@ private static Stream validateExecutableSections( .filter(section -> false == hasYamlRunnerFeature("node_selector", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [node_selector] section without a corresponding \ - ["skip": "features": "node_selector"] so runners that do not support the [node_selector] section \ + ["requires": "test_runner_features": "node_selector"] so runners that do not support the [node_selector] section \ can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -243,7 +243,7 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof ContainsAssertion) .filter(section -> false == hasYamlRunnerFeature("contains", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] \ + attempted to add a [contains] assertion without a corresponding ["requires": "test_runner_features": "contains"] \ so runners that do not support the [contains] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -256,8 +256,9 @@ private static Stream validateExecutableSections( .filter(section -> false == section.getApiCallSection().getHeaders().isEmpty()) .filter(section -> false == hasYamlRunnerFeature("headers", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [do] with a [headers] section without a corresponding ["skip": "features": "headers"] \ - so runners that do not support the [headers] section can skip the test at line [%d]\ + attempted to add a [do] with a [headers] section without a corresponding \ + ["requires": "test_runner_features": "headers"] so runners that do not support the [headers] section \ + can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -267,7 +268,7 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof CloseToAssertion) .filter(section -> false == hasYamlRunnerFeature("close_to", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [close_to] assertion without a corresponding ["skip": "features": "close_to"] \ + attempted to add a [close_to] assertion without a corresponding ["requires": "test_runner_features": "close_to"] \ so runners that do not support the [close_to] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -278,7 +279,7 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof IsAfterAssertion) .filter(section -> false == hasYamlRunnerFeature("is_after", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add an [is_after] assertion without a corresponding ["skip": "features": "is_after"] \ + attempted to add an [is_after] assertion without a corresponding ["requires": "test_runner_features": "is_after"] \ so runners that do not support the [is_after] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 4155472b42640..e850ade2bdf1d 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.Node; @@ -86,6 +87,16 @@ */ public class DoSection implements ExecutableSection { public static DoSection parse(XContentParser parser) throws IOException { + return parse(parser, false); + } + + @UpdateForV9 + @Deprecated + public static DoSection parseWithLegacyNodeSelectorSupport(XContentParser parser) throws IOException { + return parse(parser, true); + } + + private static DoSection parse(XContentParser parser, boolean enableLegacyNodeSelectorSupport) throws IOException { String currentFieldName = null; XContentParser.Token token; @@ -175,7 +186,7 @@ public static DoSection parse(XContentParser parser) throws IOException { if (token == XContentParser.Token.FIELD_NAME) { selectorName = parser.currentName(); } else { - NodeSelector newSelector = buildNodeSelector(selectorName, parser); + NodeSelector newSelector = buildNodeSelector(selectorName, parser, enableLegacyNodeSelectorSupport); nodeSelector = nodeSelector == NodeSelector.ANY ? newSelector : new ComposeNodeSelector(nodeSelector, newSelector); @@ -610,10 +621,11 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, ) ); - private static NodeSelector buildNodeSelector(String name, XContentParser parser) throws IOException { + private static NodeSelector buildNodeSelector(String name, XContentParser parser, boolean enableLegacyVersionSupport) + throws IOException { return switch (name) { case "attribute" -> parseAttributeValuesSelector(parser); - case "version" -> parseVersionSelector(parser); + case "version" -> parseVersionSelector(parser, enableLegacyVersionSupport); default -> throw new XContentParseException(parser.getTokenLocation(), "unknown node_selector [" + name + "]"); }; } @@ -678,14 +690,31 @@ private static boolean matchWithRange( } } - private static NodeSelector parseVersionSelector(XContentParser parser) throws IOException { + private static NodeSelector parseVersionSelector(XContentParser parser, boolean enableLegacyVersionSupport) throws IOException { if (false == parser.currentToken().isValue()) { throw new XContentParseException(parser.getTokenLocation(), "expected [version] to be a value"); } - var acceptedVersionRange = VersionRange.parseVersionRanges(parser.text()); - final Predicate nodeMatcher = nodeVersion -> matchWithRange(nodeVersion, acceptedVersionRange, parser.getTokenLocation()); - final String versionSelectorString = "version ranges " + acceptedVersionRange; + final Predicate nodeMatcher; + final String versionSelectorString; + if (parser.text().equals("current")) { + nodeMatcher = nodeVersion -> Build.current().version().equals(nodeVersion); + versionSelectorString = "version is " + Build.current().version() + " (current)"; + } else if (parser.text().equals("original")) { + nodeMatcher = nodeVersion -> Build.current().version().equals(nodeVersion) == false; + versionSelectorString = "version is not current (original)"; + } else { + if (enableLegacyVersionSupport) { + var acceptedVersionRange = VersionRange.parseVersionRanges(parser.text()); + nodeMatcher = nodeVersion -> matchWithRange(nodeVersion, acceptedVersionRange, parser.getTokenLocation()); + versionSelectorString = "version ranges " + acceptedVersionRange; + } else { + throw new XContentParseException( + parser.getTokenLocation(), + "unknown version selector [" + parser.text() + "]. Only [current] and [original] are allowed." + ); + } + } return new NodeSelector() { @Override diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java index 7f65a29e510b6..f4c9aaa619911 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.Features; import org.elasticsearch.xcontent.XContentLocation; @@ -17,7 +18,9 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.function.Predicate; /** @@ -34,9 +37,13 @@ public class PrerequisiteSection { static class PrerequisiteSectionBuilder { String skipVersionRange = null; String skipReason = null; + String requiresReason = null; List requiredYamlRunnerFeatures = new ArrayList<>(); List skipOperatingSystems = new ArrayList<>(); + Set skipClusterFeatures = new HashSet<>(); + Set requiredClusterFeatures = new HashSet<>(); + enum XPackRequired { NOT_SPECIFIED, YES, @@ -56,6 +63,11 @@ public PrerequisiteSectionBuilder setSkipReason(String skipReason) { return this; } + public PrerequisiteSectionBuilder setRequiresReason(String requiresReason) { + this.requiresReason = requiresReason; + return this; + } + public PrerequisiteSectionBuilder requireYamlRunnerFeature(String featureName) { requiredYamlRunnerFeatures.add(featureName); return this; @@ -79,6 +91,16 @@ public PrerequisiteSectionBuilder skipIfXPack() { return this; } + public PrerequisiteSectionBuilder skipIfClusterFeature(String featureName) { + skipClusterFeatures.add(featureName); + return this; + } + + public PrerequisiteSectionBuilder requireClusterFeature(String featureName) { + requiredClusterFeatures.add(featureName); + return this; + } + public PrerequisiteSectionBuilder skipIfOs(String osName) { this.skipOperatingSystems.add(osName); return this; @@ -88,7 +110,9 @@ void validate(XContentLocation contentLocation) { if ((Strings.hasLength(skipVersionRange) == false) && requiredYamlRunnerFeatures.isEmpty() && skipOperatingSystems.isEmpty() - && xpackRequired == XPackRequired.NOT_SPECIFIED) { + && xpackRequired == XPackRequired.NOT_SPECIFIED + && requiredClusterFeatures.isEmpty() + && skipClusterFeatures.isEmpty()) { throw new ParsingException( contentLocation, "at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section" @@ -100,6 +124,12 @@ void validate(XContentLocation contentLocation) { if (skipOperatingSystems.isEmpty() == false && Strings.hasLength(skipReason) == false) { throw new ParsingException(contentLocation, "reason is mandatory within skip os section"); } + if (skipClusterFeatures.isEmpty() == false && Strings.hasLength(skipReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip cluster_features section"); + } + if (requiredClusterFeatures.isEmpty() == false && Strings.hasLength(requiresReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within requires cluster_features section"); + } // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os if (skipOperatingSystems.isEmpty() == false && requiredYamlRunnerFeatures.contains("skip_os") == false) { throw new ParsingException(contentLocation, "if os is specified, test runner feature [skip_os] must be set"); @@ -107,6 +137,9 @@ void validate(XContentLocation contentLocation) { if (xpackRequired == XPackRequired.MISMATCHED) { throw new ParsingException(contentLocation, "either [xpack] or [no_xpack] can be present, not both"); } + if (Sets.haveNonEmptyIntersection(skipClusterFeatures, requiredClusterFeatures)) { + throw new ParsingException(contentLocation, "a cluster feature can be specified either in [requires] or [skip], not both"); + } } public PrerequisiteSection build() { @@ -131,8 +164,14 @@ public PrerequisiteSection build() { if (skipOperatingSystems.isEmpty() == false) { skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); } + if (requiredClusterFeatures.isEmpty() == false) { + requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); + } + if (skipClusterFeatures.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); + } } - return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, null, requiredYamlRunnerFeatures); + return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, requiresReason, requiredYamlRunnerFeatures); } } @@ -160,6 +199,10 @@ static PrerequisiteSectionBuilder parseInternal(XContentParser parser) throws IO parseSkipSection(parser, builder); hasPrerequisiteSection = true; maybeAdvanceToNextField(parser); + } else if ("requires".equals(parser.currentName())) { + parseRequiresSection(parser, builder); + hasPrerequisiteSection = true; + maybeAdvanceToNextField(parser); } else { unknownFieldName = true; } @@ -209,6 +252,8 @@ static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder b parseFeatureField(parser.text(), builder); } else if ("os".equals(currentFieldName)) { builder.skipIfOs(parser.text()); + } else if ("cluster_features".equals(currentFieldName)) { + builder.skipIfClusterFeature(parser.text()); } else { throw new ParsingException( parser.getTokenLocation(), @@ -224,6 +269,54 @@ static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder b while (parser.nextToken() != XContentParser.Token.END_ARRAY) { builder.skipIfOs(parser.text()); } + } else if ("cluster_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.skipIfClusterFeature(parser.text()); + } + } + } + } + parser.nextToken(); + } + + static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException( + "Expected [" + + XContentParser.Token.START_OBJECT + + ", found [" + + parser.currentToken() + + "], the requires section is not properly indented" + ); + } + String currentFieldName = null; + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if ("reason".equals(currentFieldName)) { + builder.setRequiresReason(parser.text()); + } else if ("test_runner_features".equals(currentFieldName)) { + parseFeatureField(parser.text(), builder); + } else if ("cluster_features".equals(currentFieldName)) { + builder.requireClusterFeature(parser.text()); + } else { + throw new ParsingException( + parser.getTokenLocation(), + "field " + currentFieldName + " not supported within requires section" + ); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("test_runner_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + parseFeatureField(parser.text(), builder); + } + } else if ("cluster_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.requireClusterFeature(parser.text()); + } } } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index edc043e15527d..1f5bdc71dde37 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -468,6 +468,41 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().hasYamlRunnerFeature("skip_os"), equalTo(true)); } + public void testParseSkipAndRequireClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + "Broken on some os": + + - skip: + cluster_features: [unsupported-feature1, unsupported-feature2] + reason: "unsupported-features are not supported" + - requires: + cluster_features: required-feature1 + reason: "required-feature1 is required" + - do: + indices.get_mapping: + index: test_index + type: test_type + + - match: {test_type.properties.text.type: string} + - match: {test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Broken on some os")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat( + restTestSuite.getTestSections().get(0).getPrerequisiteSection().skipReason, + equalTo("unsupported-features are not supported") + ); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().requireReason, equalTo("required-feature1 is required")); + } + public void testParseFileWithSingleTestSection() throws Exception { final Path filePath = createTempFile("tyf", ".yml"); Files.writeString(filePath, """ @@ -541,7 +576,7 @@ public void testAddingDoWithWarningWithoutSkipWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] \ + attempted to add a [do] with a [warnings] section without a corresponding ["requires": "test_runner_features": "warnings"] \ so runners that do not support the [warnings] section can skip the test at line [%d]\ """, lineNumber))); } @@ -555,7 +590,8 @@ public void testAddingDoWithWarningRegexWithoutSkipWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [warnings_regex] section without a corresponding ["skip": "features": "warnings_regex"] \ + attempted to add a [do] with a [warnings_regex] section without a corresponding \ + ["requires": "test_runner_features": "warnings_regex"] \ so runners that do not support the [warnings_regex] section can skip the test at line [%d]\ """, lineNumber))); } @@ -569,7 +605,7 @@ public void testAddingDoWithAllowedWarningWithoutSkipAllowedWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [allowed_warnings] section without a corresponding ["skip": "features": \ + attempted to add a [do] with a [allowed_warnings] section without a corresponding ["requires": "test_runner_features": \ "allowed_warnings"] so runners that do not support the [allowed_warnings] section can skip the test at \ line [%d]\ """, lineNumber))); @@ -584,7 +620,7 @@ public void testAddingDoWithAllowedWarningRegexWithoutSkipAllowedWarnings() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding ["skip": "features": \ + attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding ["requires": "test_runner_features": \ "allowed_warnings_regex"] so runners that do not support the [allowed_warnings_regex] section can skip the test \ at line [%d]\ """, lineNumber))); @@ -600,7 +636,7 @@ public void testAddingDoWithHeaderWithoutSkipHeaders() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [headers] section without a corresponding ["skip": "features": "headers"] \ + attempted to add a [do] with a [headers] section without a corresponding ["requires": "test_runner_features": "headers"] \ so runners that do not support the [headers] section can skip the test at line [%d]\ """, lineNumber))); } @@ -615,7 +651,8 @@ public void testAddingDoWithNodeSelectorWithoutSkipNodeSelector() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [node_selector] section without a corresponding ["skip": "features": "node_selector"] \ + attempted to add a [do] with a [node_selector] section without a corresponding \ + ["requires": "test_runner_features": "node_selector"] \ so runners that do not support the [node_selector] section can skip the test at line [%d]\ """, lineNumber))); } @@ -631,7 +668,7 @@ public void testAddingContainsWithoutSkipContains() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] \ + attempted to add a [contains] assertion without a corresponding ["requires": "test_runner_features": "contains"] \ so runners that do not support the [contains] assertion can skip the test at line [%d]\ """, lineNumber))); } @@ -683,13 +720,15 @@ public void testMultipleValidationErrors() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertEquals(Strings.format(""" api/name: - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] so runners that \ - do not support the [contains] assertion can skip the test at line [%d], - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] so runners \ - that do not support the [warnings] section can skip the test at line [%d], - attempted to add a [do] with a [node_selector] section without a corresponding ["skip": "features": "node_selector"] so \ - runners that do not support the [node_selector] section can skip the test \ - at line [%d]\ + attempted to add a [contains] assertion without a corresponding \ + ["requires": "test_runner_features": "contains"] \ + so runners that do not support the [contains] assertion can skip the test at line [%d], + attempted to add a [do] with a [warnings] section without a corresponding \ + ["requires": "test_runner_features": "warnings"] \ + so runners that do not support the [warnings] section can skip the test at line [%d], + attempted to add a [do] with a [node_selector] section without a corresponding \ + ["requires": "test_runner_features": "node_selector"] \ + so runners that do not support the [node_selector] section can skip the test at line [%d]\ """, firstLineNumber, secondLineNumber, thirdLineNumber), e.getMessage()); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 0cb9a3e29e63f..7d9557d29e568 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -10,12 +10,12 @@ import org.apache.http.HttpHost; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.xcontent.XContentLocation; @@ -579,14 +579,15 @@ public void testParseDoSectionAllowedWarnings() throws Exception { assertThat(e.getMessage(), equalTo("the warning [foo] was both allowed and expected")); } - public void testNodeSelectorByVersionRange() throws IOException { + @UpdateForV9 // remove + public void testLegacyNodeSelectorByVersionRange() throws IOException { parser = createParser(YamlXContent.yamlXContent, """ node_selector: version: 5.2.0-6.0.0 indices.get_field_mapping: index: test_index"""); - DoSection doSection = DoSection.parse(parser); + DoSection doSection = DoSection.parseWithLegacyNodeSelectorSupport(parser); assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); Node v170 = nodeWithVersion("1.7.0"); Node v521 = nodeWithVersion("5.2.1"); @@ -629,26 +630,21 @@ public void testNodeSelectorByVersionRange() throws IOException { } } - public void testNodeSelectorByVersionRangeFailsWithNonSemanticVersion() throws IOException { + public void testNodeSelectorByVersionRangeFails() throws IOException { parser = createParser(YamlXContent.yamlXContent, """ node_selector: version: 5.2.0-6.0.0 indices.get_field_mapping: index: test_index"""); - DoSection doSection = DoSection.parse(parser); - assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); - Node nonSemantic = nodeWithVersion("abddef"); - List nodes = new ArrayList<>(); + var exception = expectThrows(XContentParseException.class, () -> DoSection.parse(parser)); + assertThat(exception.getMessage(), endsWith("unknown version selector [5.2.0-6.0.0]. Only [current] and [original] are allowed.")); - var exception = expectThrows( - XContentParseException.class, - () -> doSection.getApiCallSection().getNodeSelector().select(List.of(nonSemantic)) - ); - assertThat( - exception.getMessage(), - endsWith("[version] range node selector expects a semantic version format (x.y.z), but found abddef") - ); + // We are throwing an early exception - this means the parser content is not fully consumed. This is OK as it would make + // the tests fail pointing to the correct syntax error location, preventing any further use of parser. + // Explicitly close the parser to avoid AbstractClientYamlTestFragmentParserTestCase checks. + parser.close(); + parser = null; } public void testNodeSelectorCurrentVersion() throws IOException { @@ -663,16 +659,36 @@ public void testNodeSelectorCurrentVersion() throws IOException { Node v170 = nodeWithVersion("1.7.0"); Node v521 = nodeWithVersion("5.2.1"); Node v550 = nodeWithVersion("5.5.0"); - Node oldCurrent = nodeWithVersion(Version.CURRENT.toString()); - Node newCurrent = nodeWithVersion(Build.current().version()); + Node current = nodeWithVersion(Build.current().version()); + List nodes = new ArrayList<>(); + nodes.add(v170); + nodes.add(v521); + nodes.add(v550); + nodes.add(current); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(List.of(current), nodes); + } + + public void testNodeSelectorNonCurrentVersion() throws IOException { + parser = createParser(YamlXContent.yamlXContent, """ + node_selector: + version: original + indices.get_field_mapping: + index: test_index"""); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node v170 = nodeWithVersion("1.7.0"); + Node v521 = nodeWithVersion("5.2.1"); + Node v550 = nodeWithVersion("5.5.0"); + Node current = nodeWithVersion(Build.current().version()); List nodes = new ArrayList<>(); nodes.add(v170); nodes.add(v521); nodes.add(v550); - nodes.add(oldCurrent); - nodes.add(newCurrent); + nodes.add(current); doSection.getApiCallSection().getNodeSelector().select(nodes); - assertEquals(List.of(oldCurrent, newCurrent), nodes); + assertEquals(List.of(v170, v521, v550), nodes); } private static Node nodeWithVersion(String version) { @@ -741,7 +757,7 @@ private static Node nodeWithAttributes(Map> attributes) { public void testNodeSelectorByTwoThings() throws IOException { parser = createParser(YamlXContent.yamlXContent, """ node_selector: - version: 5.2.0-6.0.0 + version: current attribute: attr: val indices.get_field_mapping: @@ -749,9 +765,9 @@ public void testNodeSelectorByTwoThings() throws IOException { DoSection doSection = DoSection.parse(parser); assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); - Node both = nodeWithVersionAndAttributes("5.2.1", singletonMap("attr", singletonList("val"))); + Node both = nodeWithVersionAndAttributes(Build.current().version(), singletonMap("attr", singletonList("val"))); Node badVersion = nodeWithVersionAndAttributes("5.1.1", singletonMap("attr", singletonList("val"))); - Node badAttr = nodeWithVersionAndAttributes("5.2.1", singletonMap("notattr", singletonList("val"))); + Node badAttr = nodeWithVersionAndAttributes(Build.current().version(), singletonMap("notattr", singletonList("val"))); List nodes = new ArrayList<>(); nodes.add(both); nodes.add(badVersion); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java index b02658694d82f..181ec34fefb7e 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java @@ -363,8 +363,10 @@ public void testParseSkipSectionOsListNoVersion() throws Exception { public void testParseSkipSectionOsListTestFeaturesInRequires() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ + - requires: + test_runner_features: skip_os + reason: skip_os is needed for skip based on os - skip: - features: [skip_os] os: [debian-9,windows-95,ms-dos] reason: see gh#xyz """); @@ -391,6 +393,95 @@ public void testParseSkipSectionOsNoFeatureNoVersion() throws Exception { assertThat(e.getMessage(), is("if os is specified, test runner feature [skip_os] must be set")); } + public void testParseRequireSectionClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + cluster_features: needed-feature + reason: test skipped when cluster lacks needed-feature + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseRequiresSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredClusterFeatures, contains("needed-feature")); + assertThat(skipSectionBuilder.requiresReason, is("test skipped when cluster lacks needed-feature")); + } + + public void testParseSkipSectionClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + cluster_features: undesired-feature + reason: test skipped when undesired-feature is present + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, contains("undesired-feature")); + assertThat(skipSectionBuilder.skipReason, is("test skipped when undesired-feature is present")); + } + + public void testParseRequireAndSkipSectionsClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: needed-feature + reason: test needs needed-feature to run + - skip: + cluster_features: undesired-feature + reason: test cannot run when undesired-feature are present + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, contains("undesired-feature")); + assertThat(skipSectionBuilder.requiredClusterFeatures, contains("needed-feature")); + assertThat(skipSectionBuilder.skipReason, is("test cannot run when undesired-feature are present")); + assertThat(skipSectionBuilder.requiresReason, is("test needs needed-feature to run")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseRequireAndSkipSectionMultipleClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: [needed-feature-1, needed-feature-2] + reason: test needs some to run + - skip: + cluster_features: [undesired-feature-1, undesired-feature-2] + reason: test cannot run when some are present + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, containsInAnyOrder("undesired-feature-1", "undesired-feature-2")); + assertThat(skipSectionBuilder.requiredClusterFeatures, containsInAnyOrder("needed-feature-1", "needed-feature-2")); + assertThat(skipSectionBuilder.skipReason, is("test cannot run when some are present")); + assertThat(skipSectionBuilder.requiresReason, is("test needs some to run")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseSameRequireAndSkipClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: some-feature + reason: test needs some-feature to run + - skip: + cluster_features: some-feature + reason: test cannot run with some-feature + """); + + var e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat(e.getMessage(), is("a cluster feature can be specified either in [requires] or [skip], not both")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + public void testSkipClusterFeaturesAllRequiredMatch() { PrerequisiteSection section = new PrerequisiteSection( emptyList(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java index c332694d93975..093ec031d0b30 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshot.java @@ -262,7 +262,8 @@ private SegmentCommitInfo syncSegment( 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, - fieldInfo.isSoftDeletesField() + fieldInfo.isSoftDeletesField(), + fieldInfo.isParentField() ) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java index 9231dfb744a36..eb4db6c24507d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java @@ -95,21 +95,18 @@ public static Decision shouldFilter( ); if (tier.isPresent()) { String tierName = tier.get(); - if (allocationAllowed(tierName, node)) { - if (allocation.debugDecision()) { - return debugYesAllowed(allocation, tierPreference, tierName); - } - return Decision.YES; + assert Strings.hasText(tierName) : "tierName must be not null and non-empty, but was [" + tierName + "]"; + if (node.hasRole(DiscoveryNodeRole.DATA_ROLE.roleName())) { + return allocation.debugDecision() + ? debugYesAllowed(allocation, tierPreference, DiscoveryNodeRole.DATA_ROLE.roleName()) + : Decision.YES; } - if (allocation.debugDecision()) { - return debugNoRequirementsNotMet(allocation, tierPreference, tierName); + if (node.hasRole(tierName)) { + return allocation.debugDecision() ? debugYesAllowed(allocation, tierPreference, tierName) : Decision.YES; } - return Decision.NO; + return allocation.debugDecision() ? debugNoRequirementsNotMet(allocation, tierPreference, tierName) : Decision.NO; } - if (allocation.debugDecision()) { - return debugNoNoNodesAvailable(allocation, tierPreference); - } - return Decision.NO; + return allocation.debugDecision() ? debugNoNoNodesAvailable(allocation, tierPreference) : Decision.NO; } private static Decision debugNoNoNodesAvailable(RoutingAllocation allocation, List tierPreference) { @@ -278,11 +275,6 @@ static boolean tierNodesPresentConsideringRemovals(String singleTier, DiscoveryN return false; } - public static boolean allocationAllowed(String tierName, DiscoveryNode node) { - assert Strings.hasText(tierName) : "tierName must be not null and non-empty, but was [" + tierName + "]"; - return node.hasRole(DiscoveryNodeRole.DATA_ROLE.roleName()) || node.hasRole(tierName); - } - public static boolean allocationAllowed(String tierName, Set roles) { assert Strings.hasText(tierName) : "tierName must be not null and non-empty, but was [" + tierName + "]"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java index 7134ceba475fe..7ed57ca93adf0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java @@ -202,7 +202,16 @@ public void testIndexPrefer() { ) ); } + } + { + final var state = clusterStateWithIndexAndNodes("data_warm", DiscoveryNodes.builder().add(DATA_NODE).build(), null); + assertAllocationDecision( + state, + DATA_NODE, + Decision.Type.YES, + "index has a preference for tiers [data_warm] and node has tier [data]" + ); } } diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java index bdb10ea65dc1b..8c1e47c29670a 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/FieldExtractorIT.java @@ -9,13 +9,11 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.test.TestClustersThreadFilter; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.FieldExtractorTestCase; import org.junit.ClassRule; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105837") @ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class FieldExtractorIT extends FieldExtractorTestCase { @ClassRule diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java index 3f8caa3bdf5d4..39c21651a7e02 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/FieldExtractorTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matcher; +import org.junit.Before; import java.io.IOException; import java.math.BigDecimal; @@ -57,6 +58,14 @@ public abstract class FieldExtractorTestCase extends ESRestTestCase { private static final Logger logger = LogManager.getLogger(FieldExtractorTestCase.class); + @Before + public void notOld() { + assumeTrue( + "support changed pretty radically in 8.12 so we don't test against 8.11", + getCachedNodesVersions().stream().allMatch(v -> Version.fromString(v).onOrAfter(Version.V_8_12_0)) + ); + } + public void testTextField() throws IOException { textTest().test(randomAlphaOfLength(20)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index f773904ed8973..55e8ba164ba70 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -77,6 +77,7 @@ public void testMatchAll() throws IOException { testCase(new SingleValueQuery(new MatchAll(Source.EMPTY), "foo").asBuilder(), false, false, this::runCase); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105952") public void testMatchSome() throws IOException { int max = between(1, 100); testCase( @@ -137,6 +138,7 @@ public void testNotMatchNone() throws IOException { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105918") public void testNotMatchSome() throws IOException { int max = between(1, 100); testCase( diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index ec9fad3e5077d..6d34fb0eced79 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -395,7 +396,7 @@ public void testILMWaitsForTimeSeriesEndTimeToLapse() throws Exception { }, 30, TimeUnit.SECONDS); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103981") + @TestLogging(value = "org.elasticsearch.xpack.ilm:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/103981") public void testRollupNonTSIndex() throws Exception { createIndex(index, alias, false); index(client(), index, true, null, "@timestamp", "2020-01-01T05:10:00Z", "volume", 11.0, "metricset", randomAlphaOfLength(5)); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java index 637fbc8f8bf82..b9c58f728d1e3 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java @@ -238,7 +238,7 @@ public void testIndexTemplateSwapsILMForDataStreamLifecycle() throws Exception { // let's migrate this data stream to use the custom data stream lifecycle client().execute( PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getEffectiveDataRetention()) + new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getDataStreamRetention()) ).actionGet(); assertBusy(() -> { @@ -580,7 +580,7 @@ public void testUpdateIndexTemplateToDataStreamLifecyclePreference() throws Exce // let's migrate this data stream to use the custom data stream lifecycle client().execute( PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getEffectiveDataRetention()) + new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, customLifecycle.getDataStreamRetention()) ).actionGet(); // data stream was rolled over and has 4 indices, 2 managed by ILM, and 2 managed by the custom data stream lifecycle diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index ede57764a0813..d44d2181f0ce8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -121,9 +121,8 @@ public void putDatafeed( final RoleDescriptor.IndicesPrivileges.Builder indicesPrivilegesBuilder = RoleDescriptor.IndicesPrivileges.builder() .indices(indices); - ActionListener privResponseListener = ActionListener.wrap( - r -> handlePrivsResponse(username, request, r, state, threadPool, listener), - listener::onFailure + ActionListener privResponseListener = listener.delegateFailureAndWrap( + (l, r) -> handlePrivsResponse(username, request, r, state, threadPool, l) ); ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap(response -> { @@ -173,15 +172,14 @@ public void getDatafeeds( request.getDatafeedId(), request.allowNoMatch(), parentTaskId, - ActionListener.wrap( - datafeedBuilders -> listener.onResponse( + listener.delegateFailureAndWrap( + (l, datafeedBuilders) -> l.onResponse( new QueryPage<>( datafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()), datafeedBuilders.size(), DatafeedConfig.RESULTS_FIELD ) - ), - listener::onFailure + ) ) ); } @@ -222,10 +220,7 @@ public void updateDatafeed( request.getUpdate(), headers, jobConfigProvider::validateDatafeedJob, - ActionListener.wrap( - updatedConfig -> listener.onResponse(new PutDatafeedAction.Response(updatedConfig)), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, updatedConfig) -> l.onResponse(new PutDatafeedAction.Response(updatedConfig))) ); }); @@ -254,19 +249,18 @@ public void deleteDatafeed(DeleteDatafeedAction.Request request, ClusterState st String datafeedId = request.getDatafeedId(); - datafeedConfigProvider.getDatafeedConfig(datafeedId, null, ActionListener.wrap(datafeedConfigBuilder -> { + datafeedConfigProvider.getDatafeedConfig(datafeedId, null, listener.delegateFailureAndWrap((delegate, datafeedConfigBuilder) -> { String jobId = datafeedConfigBuilder.build().getJobId(); JobDataDeleter jobDataDeleter = new JobDataDeleter(client, jobId); jobDataDeleter.deleteDatafeedTimingStats( - ActionListener.wrap( - unused1 -> datafeedConfigProvider.deleteDatafeedConfig( + delegate.delegateFailureAndWrap( + (l, unused1) -> datafeedConfigProvider.deleteDatafeedConfig( datafeedId, - ActionListener.wrap(unused2 -> listener.onResponse(AcknowledgedResponse.TRUE), listener::onFailure) - ), - listener::onFailure + l.delegateFailureAndWrap((ll, unused2) -> ll.onResponse(AcknowledgedResponse.TRUE)) + ) ) ); - }, listener::onFailure)); + })); } @@ -316,7 +310,7 @@ private void putDatafeed( CheckedConsumer mappingsUpdated = ok -> datafeedConfigProvider.putDatafeedConfig( request.getDatafeed(), headers, - ActionListener.wrap(response -> listener.onResponse(new PutDatafeedAction.Response(response.v1())), listener::onFailure) + listener.delegateFailureAndWrap((l, response) -> l.onResponse(new PutDatafeedAction.Response(response.v1()))) ); CheckedConsumer validationOk = ok -> { @@ -345,16 +339,19 @@ private void putDatafeed( } private void checkJobDoesNotHaveADatafeed(String jobId, ActionListener listener) { - datafeedConfigProvider.findDatafeedIdsForJobIds(Collections.singletonList(jobId), ActionListener.wrap(datafeedIds -> { - if (datafeedIds.isEmpty()) { - listener.onResponse(Boolean.TRUE); - } else { - listener.onFailure( - ExceptionsHelper.conflictStatusException( - "A datafeed [" + datafeedIds.iterator().next() + "] already exists for job [" + jobId + "]" - ) - ); - } - }, listener::onFailure)); + datafeedConfigProvider.findDatafeedIdsForJobIds( + Collections.singletonList(jobId), + listener.delegateFailureAndWrap((delegate, datafeedIds) -> { + if (datafeedIds.isEmpty()) { + delegate.onResponse(Boolean.TRUE); + } else { + delegate.onFailure( + ExceptionsHelper.conflictStatusException( + "A datafeed [" + datafeedIds.iterator().next() + "] already exists for job [" + jobId + "]" + ) + ); + } + }) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java index be2c8dd871a9b..bcdf5e83cc5ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactory.java @@ -59,13 +59,12 @@ static void create( ) { final boolean hasAggs = datafeed.hasAggregations(); final boolean isComposite = hasAggs && datafeed.hasCompositeAgg(xContentRegistry); - ActionListener factoryHandler = ActionListener.wrap( - factory -> listener.onResponse( + ActionListener factoryHandler = listener.delegateFailureAndWrap( + (l, factory) -> l.onResponse( datafeed.getChunkingConfig().isEnabled() ? new ChunkedDataExtractorFactory(datafeed, job, xContentRegistry, factory) : factory - ), - listener::onFailure + ) ); ActionListener getRollupIndexCapsActionHandler = ActionListener.wrap(response -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index e226056217351..fbabc9903c4cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -223,7 +223,7 @@ public void findDatafeedIdsForJobIds(Collection jobIds, ActionListenerwrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { Set datafeedIds = new HashSet<>(); // There cannot be more than one datafeed per job assert response.getHits().getTotalHits().value <= jobIds.size(); @@ -233,8 +233,8 @@ public void findDatafeedIdsForJobIds(Collection jobIds, ActionListenerwrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { Map datafeedsByJobId = new HashMap<>(); // There cannot be more than one datafeed per job assert response.getHits().getTotalHits().value <= jobIds.size(); @@ -265,8 +265,8 @@ public void findDatafeedsByJobIds( DatafeedConfig.Builder builder = parseLenientlyFromSource(hit.getSourceRef()); datafeedsByJobId.put(builder.getJobId(), builder); } - listener.onResponse(datafeedsByJobId); - }, listener::onFailure), + delegate.onResponse(datafeedsByJobId); + }), client::search ); } @@ -440,7 +440,7 @@ public void expandDatafeedIds( client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, - ActionListener.wrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { SortedSet datafeedIds = new TreeSet<>(); SearchHit[] hits = response.getHits().getHits(); for (SearchHit hit : hits) { @@ -453,12 +453,12 @@ public void expandDatafeedIds( requiredMatches.filterMatchedIds(datafeedIds); if (requiredMatches.hasUnmatchedIds()) { // some required datafeeds were not found - listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + delegate.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); return; } - listener.onResponse(datafeedIds); - }, listener::onFailure), + delegate.onResponse(datafeedIds); + }), client::search ); @@ -502,7 +502,7 @@ public void expandDatafeedConfigs( client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, - ActionListener.wrap(response -> { + listener.delegateFailureAndWrap((delegate, response) -> { List datafeeds = new ArrayList<>(); Set datafeedIds = new HashSet<>(); SearchHit[] hits = response.getHits().getHits(); @@ -521,12 +521,12 @@ public void expandDatafeedConfigs( requiredMatches.filterMatchedIds(datafeedIds); if (requiredMatches.hasUnmatchedIds()) { // some required datafeeds were not found - listener.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); + delegate.onFailure(ExceptionsHelper.missingDatafeedException(requiredMatches.unmatchedIdsString())); return; } - listener.onResponse(datafeeds); - }, listener::onFailure), + delegate.onResponse(datafeeds); + }), client::search ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index 223154737df3f..d370e8af52549 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.dataframe.extractor.ExtractedFieldsDetector; import org.elasticsearch.xpack.ml.dataframe.extractor.ExtractedFieldsDetectorFactory; import org.elasticsearch.xpack.ml.dataframe.inference.InferenceRunner; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; @@ -171,9 +170,8 @@ public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, Time }, task::setFailed); // Retrieve configuration - ActionListener statsIndexListener = ActionListener.wrap( - aBoolean -> configProvider.get(task.getParams().getId(), configListener), - configListener::onFailure + ActionListener statsIndexListener = configListener.delegateFailureAndWrap( + (l, aBoolean) -> configProvider.get(task.getParams().getId(), l) ); // Make sure the stats index and alias exist @@ -203,25 +201,22 @@ private void createStatsIndexAndUpdateMappingsIfNecessary( TimeValue masterNodeTimeout, ActionListener listener ) { - ActionListener createIndexListener = ActionListener.wrap( - aBoolean -> ElasticsearchMappings.addDocMappingIfMissing( - MlStatsIndex.writeAlias(), - MlStatsIndex::wrappedMapping, - clientToUse, - clusterState, - masterNodeTimeout, - listener, - MlStatsIndex.STATS_INDEX_MAPPINGS_VERSION - ), - listener::onFailure - ); - MlStatsIndex.createStatsIndexAndAliasIfNecessary( clientToUse, clusterState, expressionResolver, masterNodeTimeout, - createIndexListener + listener.delegateFailureAndWrap( + (l, aBoolean) -> ElasticsearchMappings.addDocMappingIfMissing( + MlStatsIndex.writeAlias(), + MlStatsIndex::wrappedMapping, + clientToUse, + clusterState, + masterNodeTimeout, + l, + MlStatsIndex.STATS_INDEX_MAPPINGS_VERSION + ) + ) ); } @@ -306,25 +301,25 @@ private void executeJobInMiddleOfReindexing(DataFrameAnalyticsTask task, DataFra private void buildInferenceStep(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config, ActionListener listener) { ParentTaskAssigningClient parentTaskClient = new ParentTaskAssigningClient(client, task.getParentTaskId()); - - ActionListener extractedFieldsDetectorListener = ActionListener.wrap(extractedFieldsDetector -> { - ExtractedFields extractedFields = extractedFieldsDetector.detect().v1(); - InferenceRunner inferenceRunner = new InferenceRunner( - settings, - parentTaskClient, - modelLoadingService, - resultsPersisterService, - task.getParentTaskId(), - config, - extractedFields, - task.getStatsHolder().getProgressTracker(), - task.getStatsHolder().getDataCountsTracker() - ); - InferenceStep inferenceStep = new InferenceStep(client, task, auditor, config, threadPool, inferenceRunner); - listener.onResponse(inferenceStep); - }, listener::onFailure); - - new ExtractedFieldsDetectorFactory(parentTaskClient).createFromDest(config, extractedFieldsDetectorListener); + new ExtractedFieldsDetectorFactory(parentTaskClient).createFromDest( + config, + listener.delegateFailureAndWrap((delegate, extractedFieldsDetector) -> { + ExtractedFields extractedFields = extractedFieldsDetector.detect().v1(); + InferenceRunner inferenceRunner = new InferenceRunner( + settings, + parentTaskClient, + modelLoadingService, + resultsPersisterService, + task.getParentTaskId(), + config, + extractedFields, + task.getStatsHolder().getProgressTracker(), + task.getStatsHolder().getDataCountsTracker() + ); + InferenceStep inferenceStep = new InferenceStep(client, task, auditor, config, threadPool, inferenceRunner); + delegate.onResponse(inferenceStep); + }) + ); } public boolean isNodeShuttingDown() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java index 81de8add4ae2e..8623f456b2035 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java @@ -134,9 +134,11 @@ private static void prepareCreateIndexRequest( AtomicReference settingsHolder = new AtomicReference<>(); AtomicReference mappingsHolder = new AtomicReference<>(); - ActionListener fieldCapabilitiesListener = ActionListener.wrap(fieldCapabilitiesResponse -> { - listener.onResponse(createIndexRequest(clock, config, settingsHolder.get(), mappingsHolder.get(), fieldCapabilitiesResponse)); - }, listener::onFailure); + ActionListener fieldCapabilitiesListener = listener.delegateFailureAndWrap( + (l, fieldCapabilitiesResponse) -> l.onResponse( + createIndexRequest(clock, config, settingsHolder.get(), mappingsHolder.get(), fieldCapabilitiesResponse) + ) + ); ActionListener mappingsListener = ActionListener.wrap(mappings -> { mappingsHolder.set(mappings); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java index b9d7e31a2cf73..09c3ae15c90a3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorFactory.java @@ -147,22 +147,22 @@ public static void createForDestinationIndex( ActionListener listener ) { ExtractedFieldsDetectorFactory extractedFieldsDetectorFactory = new ExtractedFieldsDetectorFactory(client); - extractedFieldsDetectorFactory.createFromDest(config, ActionListener.wrap(extractedFieldsDetector -> { + extractedFieldsDetectorFactory.createFromDest(config, listener.delegateFailureAndWrap((delegate, extractedFieldsDetector) -> { ExtractedFields extractedFields = extractedFieldsDetector.detect().v1(); - - DataFrameDataExtractorFactory extractorFactory = new DataFrameDataExtractorFactory( - client, - config.getId(), - Collections.singletonList(config.getDest().getIndex()), - config.getSource().getParsedQuery(), - extractedFields, - config.getAnalysis().getRequiredFields(), - config.getHeaders(), - config.getAnalysis().supportsMissingValues(), - createTrainTestSplitterFactory(client, config, extractedFields), - Collections.emptyMap() + delegate.onResponse( + new DataFrameDataExtractorFactory( + client, + config.getId(), + Collections.singletonList(config.getDest().getIndex()), + config.getSource().getParsedQuery(), + extractedFields, + config.getAnalysis().getRequiredFields(), + config.getHeaders(), + config.getAnalysis().supportsMissingValues(), + createTrainTestSplitterFactory(client, config, extractedFields), + Collections.emptyMap() + ) ); - listener.onResponse(extractorFactory); - }, listener::onFailure)); + })); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java index 49e25c95713ef..73f8e7bd520d4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/ExtractedFieldsDetectorFactory.java @@ -112,11 +112,6 @@ private void getCardinalitiesForFieldsWithConstraints( return; } - ActionListener searchListener = ActionListener.wrap( - searchResponse -> buildFieldCardinalitiesMap(config, searchResponse, listener), - listener::onFailure - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0) .query(config.getSource().getParsedQuery()) .runtimeMappings(config.getSource().getRuntimeMappings()); @@ -147,7 +142,7 @@ private void getCardinalitiesForFieldsWithConstraints( client, TransportSearchAction.TYPE, searchRequest, - searchListener + listener.delegateFailureAndWrap((l, searchResponse) -> buildFieldCardinalitiesMap(config, searchResponse, l)) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index 5469c6a7a7d87..8c7d490f37787 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -103,19 +103,17 @@ public void put( TimeValue timeout, ActionListener listener ) { - - ActionListener deleteLeftOverDocsListener = ActionListener.wrap( - r -> index(prepareConfigForIndex(config, headers), null, listener), - listener::onFailure - ); - - ActionListener existsListener = ActionListener.wrap(exists -> { + ActionListener existsListener = listener.delegateFailureAndWrap((l, exists) -> { if (exists) { - listener.onFailure(ExceptionsHelper.dataFrameAnalyticsAlreadyExists(config.getId())); + l.onFailure(ExceptionsHelper.dataFrameAnalyticsAlreadyExists(config.getId())); } else { - deleteLeftOverDocs(config, timeout, deleteLeftOverDocsListener); + deleteLeftOverDocs( + config, + timeout, + l.delegateFailureAndWrap((ll, r) -> index(prepareConfigForIndex(config, headers), null, ll)) + ); } - }, listener::onFailure); + }); exists(config.getId(), existsListener); } @@ -194,10 +192,10 @@ public void update( DataFrameAnalyticsConfig updatedConfig = updatedConfigBuilder.build(); // Index the update config - index(updatedConfig, getResponse, ActionListener.wrap(indexedConfig -> { + index(updatedConfig, getResponse, listener.delegateFailureAndWrap((l, indexedConfig) -> { auditor.info(id, Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_UPDATED, update.getUpdatedFields())); - listener.onResponse(indexedConfig); - }, listener::onFailure)); + l.onResponse(indexedConfig); + })); }, listener::onFailure)); } @@ -269,20 +267,26 @@ private void index( public void get(String id, ActionListener listener) { GetDataFrameAnalyticsAction.Request request = new GetDataFrameAnalyticsAction.Request(); request.setResourceId(id); - executeAsyncWithOrigin(client, ML_ORIGIN, GetDataFrameAnalyticsAction.INSTANCE, request, ActionListener.wrap(response -> { - List analytics = response.getResources().results(); - if (analytics.size() != 1) { - listener.onFailure( - ExceptionsHelper.badRequestException( - "Expected a single match for data frame analytics [{}] " + "but got [{}]", - id, - analytics.size() - ) - ); - } else { - listener.onResponse(analytics.get(0)); - } - }, listener::onFailure)); + executeAsyncWithOrigin( + client, + ML_ORIGIN, + GetDataFrameAnalyticsAction.INSTANCE, + request, + listener.delegateFailureAndWrap((delegate, response) -> { + List analytics = response.getResources().results(); + if (analytics.size() != 1) { + delegate.onFailure( + ExceptionsHelper.badRequestException( + "Expected a single match for data frame analytics [{}] " + "but got [{}]", + id, + analytics.size() + ) + ); + } else { + delegate.onResponse(analytics.get(0)); + } + }) + ); } /** @@ -298,7 +302,7 @@ public void getMultiple(String ids, boolean allowNoMatch, ActionListener listener.onResponse(response.getResources().results()), listener::onFailure) + listener.delegateFailureAndWrap((l, response) -> l.onResponse(response.getResources().results())) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java index 843d9d74a1c7d..2a8b23728fbdb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsDeleter.java @@ -126,14 +126,13 @@ private void deleteConfig(String id, ActionListener listen } private void deleteState(DataFrameAnalyticsConfig config, TimeValue timeout, ActionListener listener) { - ActionListener deleteModelStateListener = ActionListener.wrap( - r -> executeDeleteByQuery( + ActionListener deleteModelStateListener = listener.delegateFailureAndWrap( + (l, r) -> executeDeleteByQuery( AnomalyDetectorsIndex.jobStateIndexPattern(), QueryBuilders.idsQuery().addIds(StoredProgress.documentId(config.getId())), timeout, - listener - ), - listener::onFailure + l + ) ); deleteModelState(config, timeout, 1, deleteModelStateListener); @@ -146,13 +145,18 @@ private void deleteModelState(DataFrameAnalyticsConfig config, TimeValue timeout } IdsQueryBuilder query = QueryBuilders.idsQuery().addIds(config.getAnalysis().getStateDocIdPrefix(config.getId()) + docNum); - executeDeleteByQuery(AnomalyDetectorsIndex.jobStateIndexPattern(), query, timeout, ActionListener.wrap(response -> { - if (response.getDeleted() > 0) { - deleteModelState(config, timeout, docNum + 1, listener); - return; - } - listener.onResponse(true); - }, listener::onFailure)); + executeDeleteByQuery( + AnomalyDetectorsIndex.jobStateIndexPattern(), + query, + timeout, + listener.delegateFailureAndWrap((l, response) -> { + if (response.getDeleted() > 0) { + deleteModelState(config, timeout, docNum + 1, l); + return; + } + l.onResponse(true); + }) + ); } private void deleteStats(String jobId, TimeValue timeout, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java index 0c693ff2d34f4..112d164601546 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java @@ -67,11 +67,11 @@ public final void execute(ActionListener listener) { listener.onResponse(new StepResponse(true)); return; } - doExecute(ActionListener.wrap(stepResponse -> { + doExecute(listener.delegateFailureAndWrap((l, stepResponse) -> { // We persist progress at the end of each step to ensure we do not have // to repeat the step in case the node goes down without getting a chance to persist progress. - task.persistProgress(() -> listener.onResponse(stepResponse)); - }, listener::onFailure)); + task.persistProgress(() -> l.onResponse(stepResponse)); + })); } protected abstract void doExecute(ActionListener listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java index 9e56387ed773e..ec914546c7de5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java @@ -58,17 +58,16 @@ protected void doExecute(ActionListener listener) { final ParentTaskAssigningClient parentTaskClient = parentTaskClient(); // Update state to ANALYZING and start process - ActionListener dataExtractorFactoryListener = ActionListener.wrap( - dataExtractorFactory -> processManager.runJob(task, config, dataExtractorFactory, listener), - listener::onFailure + ActionListener dataExtractorFactoryListener = listener.delegateFailureAndWrap( + (l, dataExtractorFactory) -> processManager.runJob(task, config, dataExtractorFactory, l) ); - ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { + ActionListener refreshListener = dataExtractorFactoryListener.delegateFailureAndWrap((l, refreshResponse) -> { // TODO This could fail with errors. In that case we get stuck with the copied index. // We could delete the index in case of failure or we could try building the factory before reindexing // to catch the error early on. - DataFrameDataExtractorFactory.createForDestinationIndex(parentTaskClient, config, dataExtractorFactoryListener); - }, dataExtractorFactoryListener::onFailure); + DataFrameDataExtractorFactory.createForDestinationIndex(parentTaskClient, config, l); + }); // First we need to refresh the dest index to ensure data is searchable in case the job // was stopped after reindexing was complete but before the index was refreshed. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java index dbf1f3e7be3d9..258c66ad5cb0f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java @@ -59,18 +59,13 @@ public Name name() { @Override protected void doExecute(ActionListener listener) { - - ActionListener refreshListener = ActionListener.wrap( - refreshResponse -> listener.onResponse(new StepResponse(false)), - listener::onFailure - ); - - ActionListener dataCountsIndexedListener = ActionListener.wrap( - indexResponse -> refreshIndices(refreshListener), - listener::onFailure + indexDataCounts( + listener.delegateFailureAndWrap( + (l, indexResponse) -> refreshIndices( + l.delegateFailureAndWrap((ll, refreshResponse) -> ll.onResponse(new StepResponse(false))) + ) + ) ); - - indexDataCounts(dataCountsIndexedListener); } private void indexDataCounts(ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java index ad005e6d9ae6c..37ad1a5cb8f56 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -70,27 +69,21 @@ protected void doExecute(ActionListener listener) { return; } - ActionListener modelIdListener = ActionListener.wrap(modelId -> runInference(modelId, listener), listener::onFailure); - - ActionListener testDocsExistListener = ActionListener.wrap(testDocsExist -> { - if (testDocsExist) { - getModelId(modelIdListener); - } else { - // no need to run inference at all so let us skip - // loading the model in memory. - LOGGER.debug(() -> "[" + config.getId() + "] Inference step completed immediately as there are no test docs"); - task.getStatsHolder().getProgressTracker().updateInferenceProgress(100); - listener.onResponse(new StepResponse(isTaskStopping())); - return; - } - }, listener::onFailure); - - ActionListener refreshDestListener = ActionListener.wrap( - refreshResponse -> searchIfTestDocsExist(testDocsExistListener), - listener::onFailure + refreshDestAsync( + listener.delegateFailureAndWrap( + (delegate, refreshResponse) -> searchIfTestDocsExist(delegate.delegateFailureAndWrap((delegate2, testDocsExist) -> { + if (testDocsExist) { + getModelId(delegate2.delegateFailureAndWrap((l, modelId) -> runInference(modelId, l))); + } else { + // no need to run inference at all so let us skip + // loading the model in memory. + LOGGER.debug(() -> "[" + config.getId() + "] Inference step completed immediately as there are no test docs"); + task.getStatsHolder().getProgressTracker().updateInferenceProgress(100); + delegate2.onResponse(new StepResponse(isTaskStopping())); + } + })) + ) ); - - refreshDestAsync(refreshDestListener); } private void runInference(String modelId, ActionListener listener) { @@ -124,10 +117,7 @@ private void searchIfTestDocsExist(ActionListener listener) { ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, - ActionListener.wrap( - searchResponse -> listener.onResponse(searchResponse.getHits().getTotalHits().value > 0), - listener::onFailure - ) + listener.delegateFailureAndWrap((l, searchResponse) -> l.onResponse(searchResponse.getHits().getTotalHits().value > 0)) ); } @@ -142,14 +132,20 @@ private void getModelId(ActionListener listener) { SearchRequest searchRequest = new SearchRequest(InferenceIndexConstants.INDEX_PATTERN); searchRequest.source(searchSourceBuilder); - executeAsyncWithOrigin(client, ML_ORIGIN, TransportSearchAction.TYPE, searchRequest, ActionListener.wrap(searchResponse -> { - SearchHit[] hits = searchResponse.getHits().getHits(); - if (hits.length == 0) { - listener.onFailure(new ResourceNotFoundException("No model could be found to perform inference")); - } else { - listener.onResponse(hits[0].getId()); - } - }, listener::onFailure)); + executeAsyncWithOrigin( + client, + ML_ORIGIN, + TransportSearchAction.TYPE, + searchRequest, + listener.delegateFailureAndWrap((l, searchResponse) -> { + SearchHit[] hits = searchResponse.getHits().getHits(); + if (hits.length == 0) { + l.onFailure(new ResourceNotFoundException("No model could be found to perform inference")); + } else { + l.onResponse(hits[0].getId()); + } + }) + ); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 7532ae4317830..9887152c6f311 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -142,14 +142,8 @@ public void jobExists(String jobId, @Nullable TaskId parentTaskId, ActionListene * a ResourceNotFoundException is returned */ public void getJob(String jobId, ActionListener jobListener) { - jobConfigProvider.getJob( - jobId, - null, - ActionListener.wrap( - r -> jobListener.onResponse(r.build()), // TODO JIndex we shouldn't be building the job here - jobListener::onFailure - ) - ); + // TODO JIndex we shouldn't be building the job here + jobConfigProvider.getJob(jobId, null, jobListener.delegateFailureAndWrap((l, r) -> l.onResponse(r.build()))); } /** @@ -183,15 +177,14 @@ public void expandJobs(String expression, boolean allowNoMatch, ActionListener jobsListener.onResponse( + jobsListener.delegateFailureAndWrap( + (l, jobBuilders) -> l.onResponse( new QueryPage<>( jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList()), jobBuilders.size(), Job.RESULTS_FIELD ) - ), - jobsListener::onFailure + ) ) ); } @@ -253,10 +246,10 @@ public void putJob( @Override public void onResponse(Boolean mappingsUpdated) { - jobConfigProvider.putJob(job, ActionListener.wrap(response -> { + jobConfigProvider.putJob(job, actionListener.delegateFailureAndWrap((l, response) -> { auditor.info(job.getId(), Messages.getMessage(Messages.JOB_AUDIT_CREATED)); - actionListener.onResponse(new PutJobAction.Response(job)); - }, actionListener::onFailure)); + l.onResponse(new PutJobAction.Response(job)); + })); } @Override @@ -275,17 +268,16 @@ public void onFailure(Exception e) { } }; - ActionListener addDocMappingsListener = ActionListener.wrap( - indicesCreated -> ElasticsearchMappings.addDocMappingIfMissing( + ActionListener addDocMappingsListener = putJobListener.delegateFailureAndWrap( + (l, indicesCreated) -> ElasticsearchMappings.addDocMappingIfMissing( MlConfigIndex.indexName(), MlConfigIndex::mapping, client, state, request.masterNodeTimeout(), - putJobListener, + l, MlConfigIndex.CONFIG_INDEX_MAPPINGS_VERSION - ), - putJobListener::onFailure + ) ); ActionListener> checkForLeftOverDocs = ActionListener.wrap(matchedIds -> { @@ -634,14 +626,15 @@ public void updateProcessOnCalendarChanged(List calendarJobIds, ActionLi // calendarJobIds may be a group or job jobConfigProvider.expandGroupIds( calendarJobIds, - ActionListener.wrap(expandedIds -> threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - // Merge the expanded group members with the request Ids. - // Ids that aren't jobs will be filtered by isJobOpen() - expandedIds.addAll(calendarJobIds); - - openJobIds.retainAll(expandedIds); - submitJobEventUpdate(openJobIds, updateListener); - }), updateListener::onFailure) + updateListener.delegateFailureAndWrap( + (delegate, expandedIds) -> threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + // Merge the expanded group members with the request Ids. + // Ids that aren't jobs will be filtered by isJobOpen() + expandedIds.addAll(calendarJobIds); + openJobIds.retainAll(expandedIds); + submitJobEventUpdate(openJobIds, delegate); + }) + ) ); } @@ -678,12 +671,13 @@ public void revertSnapshot( jobResultsPersister.persistQuantiles( modelSnapshot.getQuantiles(), WriteRequest.RefreshPolicy.IMMEDIATE, - ActionListener.wrap(quantilesResponse -> { - // The quantiles can be large, and totally dominate the output - - // it's clearer to remove them as they are not necessary for the revert op - ModelSnapshot snapshotWithoutQuantiles = new ModelSnapshot.Builder(modelSnapshot).setQuantiles(null).build(); - actionListener.onResponse(new RevertModelSnapshotAction.Response(snapshotWithoutQuantiles)); - }, actionListener::onFailure) + // The quantiles can be large, and totally dominate the output - + // it's clearer to remove them as they are not necessary for the revert op + actionListener.delegateFailureAndWrap( + (l, quantilesResponse) -> l.onResponse( + new RevertModelSnapshotAction.Response(new ModelSnapshot.Builder(modelSnapshot).setQuantiles(null).build()) + ) + ) ); }; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index aa82c7a261b96..bd1e47e3cb160 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -70,19 +70,19 @@ private void removeData( return; } - calcCutoffEpochMs(job.getId(), retentionDays, ActionListener.wrap(response -> { + calcCutoffEpochMs(job.getId(), retentionDays, listener.delegateFailureAndWrap((delegate, response) -> { if (response == null) { - removeData(jobIterator, requestsPerSecond, listener, isTimedOutSupplier); + removeData(jobIterator, requestsPerSecond, delegate, isTimedOutSupplier); } else { removeDataBefore( job, requestsPerSecond, response.latestTimeMs, response.cutoffEpochMs, - ActionListener.wrap(r -> removeData(jobIterator, requestsPerSecond, listener, isTimedOutSupplier), listener::onFailure) + delegate.delegateFailureAndWrap((l, r) -> removeData(jobIterator, requestsPerSecond, l, isTimedOutSupplier)) ); } - }, listener::onFailure)); + })); } abstract void calcCutoffEpochMs(String jobId, long retentionDays, ActionListener listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java index 0a5612f8e0ccc..1c8c100939dc7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemover.java @@ -42,20 +42,20 @@ public void remove(float requestsPerSec, ActionListener listener, Boole listener.onResponse(false); return; } - getEmptyStateIndices(ActionListener.wrap(emptyStateIndices -> { + getEmptyStateIndices(listener.delegateFailureAndWrap((delegate, emptyStateIndices) -> { if (emptyStateIndices.isEmpty()) { - listener.onResponse(true); + delegate.onResponse(true); return; } - getCurrentStateIndices(ActionListener.wrap(currentStateIndices -> { + getCurrentStateIndices(delegate.delegateFailureAndWrap((l, currentStateIndices) -> { Set stateIndicesToRemove = Sets.difference(emptyStateIndices, currentStateIndices); if (stateIndicesToRemove.isEmpty()) { - listener.onResponse(true); + l.onResponse(true); return; } - executeDeleteEmptyStateIndices(stateIndicesToRemove, listener); - }, listener::onFailure)); - }, listener::onFailure)); + executeDeleteEmptyStateIndices(stateIndicesToRemove, l); + })); + })); } catch (Exception e) { listener.onFailure(e); } @@ -64,15 +64,21 @@ public void remove(float requestsPerSec, ActionListener listener, Boole private void getEmptyStateIndices(ActionListener> listener) { IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest().indices(AnomalyDetectorsIndex.jobStateIndexPattern()); indicesStatsRequest.setParentTask(parentTaskId); - client.admin().indices().stats(indicesStatsRequest, ActionListener.wrap(indicesStatsResponse -> { - Set emptyStateIndices = indicesStatsResponse.getIndices() - .values() - .stream() - .filter(stats -> stats.getTotal().getDocs().getCount() == 0) - .map(IndexStats::getIndex) - .collect(toSet()); - listener.onResponse(emptyStateIndices); - }, listener::onFailure)); + client.admin() + .indices() + .stats( + indicesStatsRequest, + listener.delegateFailureAndWrap( + (l, indicesStatsResponse) -> l.onResponse( + indicesStatsResponse.getIndices() + .values() + .stream() + .filter(stats -> stats.getTotal().getDocs().getCount() == 0) + .map(IndexStats::getIndex) + .collect(toSet()) + ) + ) + ); } private void getCurrentStateIndices(ActionListener> listener) { @@ -82,7 +88,7 @@ private void getCurrentStateIndices(ActionListener> listener) { .indices() .getIndex( getIndexRequest, - ActionListener.wrap(getIndexResponse -> listener.onResponse(Set.of(getIndexResponse.getIndices())), listener::onFailure) + listener.delegateFailureAndWrap((l, getIndexResponse) -> l.onResponse(Set.of(getIndexResponse.getIndices()))) ); } @@ -93,7 +99,7 @@ private void executeDeleteEmptyStateIndices(Set emptyStateIndices, Actio .indices() .delete( deleteIndexRequest, - ActionListener.wrap(deleteResponse -> listener.onResponse(deleteResponse.isAcknowledged()), listener::onFailure) + listener.delegateFailureAndWrap((l, deleteResponse) -> l.onResponse(deleteResponse.isAcknowledged())) ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 507e9dac6282d..27bd3c926d944 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -249,7 +249,7 @@ private void deleteModelSnapshots(List modelSnapshots, String job return; } JobDataDeleter deleter = new JobDataDeleter(client, jobId); - deleter.deleteModelSnapshots(modelSnapshots, ActionListener.wrap(bulkResponse -> { + deleter.deleteModelSnapshots(modelSnapshots, listener.delegateFailureAndWrap((l, bulkResponse) -> { auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_SNAPSHOTS_DELETED, modelSnapshots.size())); LOGGER.debug( () -> format( @@ -259,8 +259,8 @@ private void deleteModelSnapshots(List modelSnapshots, String job modelSnapshots.stream().map(ModelSnapshot::getDescription).collect(toList()) ) ); - listener.onResponse(true); - }, listener::onFailure)); + l.onResponse(true); + })); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 654ce87fc5e30..35e16b9fa8b88 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -195,11 +195,11 @@ static void latestBucketTime(OriginSettingClient client, TaskId parentTaskId, St searchRequest.indicesOptions(MlIndicesUtils.addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS)); searchRequest.setParentTask(parentTaskId); - client.search(searchRequest, ActionListener.wrap(response -> { + client.search(searchRequest, listener.delegateFailureAndWrap((delegate, response) -> { SearchHit[] hits = response.getHits().getHits(); if (hits.length == 0) { // no buckets found - listener.onResponse(null); + delegate.onResponse(null); } else { try ( @@ -210,12 +210,12 @@ static void latestBucketTime(OriginSettingClient client, TaskId parentTaskId, St ) ) { Bucket bucket = Bucket.LENIENT_PARSER.apply(parser, null); - listener.onResponse(bucket.getTimestamp().getTime()); + delegate.onResponse(bucket.getTimestamp().getTime()); } catch (IOException e) { - listener.onFailure(new ElasticsearchParseException("failed to parse bucket", e)); + delegate.onFailure(new ElasticsearchParseException("failed to parse bucket", e)); } } - }, listener::onFailure)); + })); } private void auditResultsWereDeleted(String jobId, long cutoffEpochMs) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 09cd6225cf0ca..c50e744bde96b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -398,18 +398,18 @@ private void stopAssociatedDatafeedForFailedJob(String jobId) { } private void getRunningDatafeed(String jobId, ActionListener listener) { - ActionListener> datafeedListener = ActionListener.wrap(datafeeds -> { + ActionListener> datafeedListener = listener.delegateFailureAndWrap((delegate, datafeeds) -> { assert datafeeds.size() <= 1; if (datafeeds.isEmpty()) { - listener.onResponse(null); + delegate.onResponse(null); return; } String datafeedId = datafeeds.iterator().next(); PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); - listener.onResponse(datafeedTask != null ? datafeedId : null); - }, listener::onFailure); + delegate.onResponse(datafeedTask != null ? datafeedId : null); + }); datafeedConfigProvider.findDatafeedIdsForJobIds(Collections.singleton(jobId), datafeedListener); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java index b560a758b8e83..a452c156e77f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/EmptyStateIndexRemoverTests.java @@ -27,6 +27,7 @@ import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; +import org.mockito.Mockito; import org.mockito.stubbing.Answer; import java.util.Map; @@ -57,6 +58,7 @@ public void setUpTests() { client = mock(Client.class); OriginSettingClient originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); + when(listener.delegateFailureAndWrap(any())).thenCallRealMethod(); deleteIndexRequestCaptor = ArgumentCaptor.forClass(DeleteIndexRequest.class); remover = new EmptyStateIndexRemover(originSettingClient, new TaskId("test", 0L)); @@ -66,6 +68,7 @@ public void setUpTests() { public void verifyNoOtherInteractionsWithMocks() { verify(client).settings(); verify(client, atLeastOnce()).threadPool(); + verify(listener, Mockito.atLeast(0)).delegateFailureAndWrap(any()); verifyNoMoreInteractions(client, listener); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java index ad0719011c92e..39f1ead7e24e0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredAnnotationsRemoverTests.java @@ -60,6 +60,7 @@ public void setUpTests() { client = mock(Client.class); originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); + when(listener.delegateFailureAndWrap(any())).thenCallRealMethod(); } public void testRemove_GivenNoJobs() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java index 5aa5b847b26be..4dbb4eda07b0a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemoverTests.java @@ -60,6 +60,7 @@ public void setUpTests() { client = mock(Client.class); originSettingClient = MockOriginSettingClient.mockOriginSettingClient(client, ClientHelper.ML_ORIGIN); listener = mock(ActionListener.class); + when(listener.delegateFailureAndWrap(any())).thenCallRealMethod(); } public void testRemove_GivenNoJobs() { diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java index df6fded49e6bb..25b4b685ac50f 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java @@ -109,7 +109,8 @@ private static FieldInfos filterFields(FieldInfos fieldInfos) { 0, fieldInfo.getVectorEncoding(), fieldInfo.getVectorSimilarityFunction(), - fieldInfo.isSoftDeletesField() + fieldInfo.isSoftDeletesField(), + fieldInfo.isParentField() ) ); } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java index 9cef274aa753e..83fcb17449100 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene50/Lucene50FieldInfosFormat.java @@ -111,6 +111,7 @@ public FieldInfos read(Directory directory, SegmentInfo segmentInfo, String segm 0, VectorEncoding.FLOAT32, VectorSimilarityFunction.EUCLIDEAN, + false, false ); infos[i].checkConsistency(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index 18b4e6ed7cb31..4b9e1b0d9211e 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -102,7 +102,10 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { // we can bypass this by forcing soft deletes to be used. TODO this restriction can be lifted when #55142 is resolved. final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true); if (randomBoolean()) { - originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum")); + // INDEX_CHECK_ON_STARTUP requires expensive processing due to verification the integrity of many important files during + // a shard recovery or relocation. Therefore, it takes lots of time for the files to clean up and the assertShardFolder + // check may not complete in 30s. + originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false"); } assertAcked(prepareCreate(indexName, originalIndexSettings)); assertAcked(indicesAdmin().prepareAliases().addAlias(indexName, aliasName)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 38222f64b282b..ddd9f40b5404c 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -111,7 +111,7 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { // we can bypass this by forcing soft deletes to be used. TODO this restriction can be lifted when #55142 is resolved. final Settings.Builder originalIndexSettings = Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), true); if (randomBoolean()) { - originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum")); + originalIndexSettings.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "false"); } assertAcked(prepareCreate(indexName, originalIndexSettings)); assertAcked(indicesAdmin().prepareAliases().addAlias(indexName, aliasName)); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java index 573edc6e517bf..e6d7a66a2bdb3 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/TransportSLMGetExpiredSnapshotsActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.Settings; @@ -286,7 +287,7 @@ private static Repository createMockRepository(ThreadPool threadPool, List consumer = invocation.getArgument(3); final ActionListener listener = invocation.getArgument(4); - final Set snapshotIds = new HashSet<>(snapshotIdCollection); - for (SnapshotInfo snapshotInfo : snapshotInfos) { - if (snapshotIds.remove(snapshotInfo.snapshotId())) { - threadPool.generic().execute(() -> { - try { - consumer.accept(snapshotInfo); - } catch (Exception e) { - fail(e); - } - }); + try (var refs = new RefCountingRunnable(() -> listener.onResponse(null))) { + final Set snapshotIds = new HashSet<>(snapshotIdCollection); + for (SnapshotInfo snapshotInfo : snapshotInfos) { + if (snapshotIds.remove(snapshotInfo.snapshotId())) { + threadPool.generic().execute(ActionRunnable.run(refs.acquireListener(), () -> { + try { + consumer.accept(snapshotInfo); + } catch (Exception e) { + fail(e); + } + })); + } } } - listener.onResponse(null); return null; }).when(repository).getSnapshotInfo(any(), anyBoolean(), any(), any(), any()); diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java index b8af2ae44623a..ec20cc3c64104 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/FetchSizeTestCase.java @@ -89,6 +89,7 @@ public void testScroll() throws SQLException { * Test for {@code SELECT} that is implemented as a scroll query. * In this test we don't retrieve all records and rely on close() to clean the cursor */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testIncompleteScroll() throws SQLException { try (Connection c = esJdbc(); Statement s = c.createStatement()) { s.setFetchSize(4); @@ -152,6 +153,7 @@ public void testScrollWithDatetimeAndTimezoneParam() throws IOException, SQLExce /** * Test for {@code SELECT} that is implemented as an aggregation. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testAggregation() throws SQLException { try (Connection c = esJdbc(); Statement s = c.createStatement()) { s.setFetchSize(4); @@ -170,6 +172,7 @@ public void testAggregation() throws SQLException { /** * Test for nested documents. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testNestedDocuments() throws SQLException { try (Connection c = esJdbc(); Statement s = c.createStatement()) { s.setFetchSize(5); diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java index e962f35be2a94..bd49ef0f6b39d 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcErrorsTestCase.java @@ -78,6 +78,7 @@ public void testSelectProjectScoreInAggContext() throws IOException, SQLExceptio } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testSelectOrderByScoreInAggContext() throws IOException, SQLException { index("test", body -> body.field("foo", 1)); try (Connection c = esJdbc()) { @@ -111,6 +112,7 @@ public void testSelectScoreSubField() throws IOException, SQLException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testHardLimitForSortOnAggregate() throws IOException, SQLException { index("test", body -> body.field("a", 1).field("b", 2)); try (Connection c = esJdbc()) { diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java index b2b983803260c..6575ff780ccb8 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/PreparedStatementTestCase.java @@ -301,6 +301,7 @@ public void testWildcardField() throws IOException, SQLException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testConstantKeywordField() throws IOException, SQLException { String mapping = """ "properties":{"id":{"type":"integer"},"text":{"type":"constant_keyword"}}"""; @@ -368,6 +369,7 @@ public void testTooMayParameters() throws IOException, SQLException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testStringEscaping() throws SQLException { try (Connection connection = esJdbc()) { try (PreparedStatement statement = connection.prepareStatement("SELECT ?, ?, ?, ?")) { diff --git a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index d99fb9674818c..d8534b963c2d7 100644 --- a/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/jdbc/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -73,7 +73,6 @@ import static org.elasticsearch.common.time.DateUtils.toMilliSeconds; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_DRIVER_VERSION; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE; -import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.UNSIGNED_LONG_MAX; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.UNSIGNED_LONG_TYPE_NAME; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asDate; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asTime; @@ -846,6 +845,7 @@ public void testGettingValidNumbersWithCastingFromUnsignedLong() throws IOExcept } // Double values testing + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingValidDoubleWithoutCasting() throws IOException, SQLException { List doubleTestValues = createTestDataForNumericValueTests(ESTestCase::randomDouble); double random1 = doubleTestValues.get(0); @@ -1158,6 +1158,7 @@ public void testGettingValidBigDecimalFromFloatWithoutCasting() throws IOExcepti ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingValidBigDecimalFromDoubleWithoutCasting() throws IOException, SQLException { List doubleTestValues = createTestDataForNumericValueTests(ESTestCase::randomDouble); doWithQuery( @@ -1405,6 +1406,7 @@ public void testGettingDateWithoutCalendarWithNanos() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingDateWithCalendar() throws Exception { long randomLongDate = randomMillisUpToYear9999(); setupDataForDateTimeTests(randomLongDate); @@ -1434,6 +1436,7 @@ public void testGettingDateWithCalendar() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingDateWithCalendarWithNanos() throws Exception { assumeTrue( "Driver version [" + JDBC_DRIVER_VERSION + "] doesn't support DATETIME with nanosecond resolution]", @@ -1597,6 +1600,7 @@ public void testGettingTimestampWithoutCalendar() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testGettingTimestampWithoutCalendarWithNanos() throws Exception { assumeTrue( "Driver version [" + JDBC_DRIVER_VERSION + "] doesn't support DATETIME with nanosecond resolution]", @@ -1929,6 +1933,7 @@ public void testGetTimeType() throws IOException, SQLException { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testValidGetObjectCalls() throws IOException, SQLException { createIndexWithMapping("test"); updateMappingForNumericValuesTests("test"); diff --git a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java index 0e0c2bc8d78b4..6a46346f627ac 100644 --- a/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java +++ b/x-pack/plugin/sql/qa/server/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/JdbcSecurityIT.java @@ -345,6 +345,7 @@ public void testMetadataGetColumnsSingleFieldExcepted() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/105840") public void testMetadataGetColumnsDocumentExcluded() throws Exception { createUser("no_3s", "read_test_without_c_3"); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml index 81d87435ad39e..c2e728535a408 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/120_profile.yml @@ -121,11 +121,10 @@ setup: --- avg 8.14 or after: - skip: - features: ["node_selector"] + version: " - 8.13.99" + reason: "avg changed starting 8.14" - do: - node_selector: - version: "8.13.99 - " esql.query: body: query: 'FROM test | STATS AVG(data) | LIMIT 1'