diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 6ed98217d34fe..110982e31e661 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -539,9 +539,9 @@ class BuildPlugin implements Plugin { from generatePOMTask.destination into "${project.buildDir}/distributions" rename { - generatePOMTask.ext.pomFileName == null ? - "${project.archivesBaseName}-${project.version}.pom" : - generatePOMTask.ext.pomFileName + generatePOMTask.ext.pomFileName == null ? + "${project.archivesBaseName}-${project.version}.pom" : + generatePOMTask.ext.pomFileName } } } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 34c266913d0a2..386457146685f 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.5.0-snapshot-13b9e28f9d +lucene = 8.0.0-snapshot-4d78db26be # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 739a590ba5f64..063fce9bcac5e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -1034,7 +1034,7 @@ public void testExplain() throws IOException { assertTrue(explainResponse.isExists()); assertTrue(explainResponse.isMatch()); assertTrue(explainResponse.hasExplanation()); - assertThat(explainResponse.getExplanation().getValue(), greaterThan(0.0f)); + assertThat(explainResponse.getExplanation().getValue().floatValue(), greaterThan(0.0f)); assertNull(explainResponse.getGetResult()); } { diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 3c54afb92c7b7..dd19594d29b87 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -21,7 +21,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.bouncycastle.bcpg.ArmoredInputStream; import org.bouncycastle.jce.provider.BouncyCastleProvider; @@ -355,7 +355,7 @@ boolean urlExists(Terminal terminal, String urlString) throws IOException { /** Returns all the official plugin names that look similar to pluginId. **/ private List checkMisspelledPlugin(String pluginId) { - LevensteinDistance ld = new LevensteinDistance(); + LevenshteinDistance ld = new LevenshteinDistance(); List> scoredKeys = new ArrayList<>(); for (String officialPlugin : OFFICIAL_PLUGINS) { float distance = ld.getDistance(pluginId, officialPlugin); diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 6e127a6ccfc69..f0303323d855f 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.5.0 -:lucene_version_path: 7_5_0 +:lucene_version: 8.0.0 +:lucene_version_path: 8_0_0 :branch: master :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index a75c21fdac658..9d9df4827fd4e 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -38,7 +38,6 @@ PUT phonetic_sample "my_analyzer": { "tokenizer": "standard", "filter": [ - "standard", "lowercase", "my_metaphone" ] diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 5eeb3a4605a40..958f48d835cd9 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -320,7 +320,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of "by_nested": { "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.3616575, "hits": [ { "_index": "sales", @@ -330,7 +330,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of "field": "comments", <1> "offset": 0 <2> }, - "_score": 0.2876821, + "_score": 0.3616575, "_source": { "comment": "This car could have better brakes", <3> "username": "baddriver007" diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index 20aa072066b5f..3097ece21db23 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -273,7 +273,6 @@ Tokenizer:: * <> Token Filters:: -* <> * <> * <> (disabled by default) @@ -292,7 +291,6 @@ PUT /standard_example "rebuilt_standard": { "tokenizer": "standard", "filter": [ - "standard", "lowercase" <1> ] } diff --git a/docs/reference/analysis/tokenfilters.asciidoc b/docs/reference/analysis/tokenfilters.asciidoc index ee891fdd09aa7..d07ab756bfa1c 100644 --- a/docs/reference/analysis/tokenfilters.asciidoc +++ b/docs/reference/analysis/tokenfilters.asciidoc @@ -9,8 +9,6 @@ or add tokens (eg synonyms). Elasticsearch has a number of built in token filters which can be used to build <>. -include::tokenfilters/standard-tokenfilter.asciidoc[] - include::tokenfilters/asciifolding-tokenfilter.asciidoc[] include::tokenfilters/flatten-graph-tokenfilter.asciidoc[] diff --git a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc index 73d35549da8b6..bd22b013334a9 100644 --- a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc @@ -15,7 +15,7 @@ PUT /asciifold_example "analyzer" : { "default" : { "tokenizer" : "standard", - "filter" : ["standard", "asciifolding"] + "filter" : ["asciifolding"] } } } @@ -37,7 +37,7 @@ PUT /asciifold_example "analyzer" : { "default" : { "tokenizer" : "standard", - "filter" : ["standard", "my_ascii_folding"] + "filter" : ["my_ascii_folding"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 956c5ad13d034..924903b9f65a8 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -16,7 +16,7 @@ PUT /elision_example "analyzer" : { "default" : { "tokenizer" : "standard", - "filter" : ["standard", "elision"] + "filter" : ["elision"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc index 05687f8669155..33a927c4b98bf 100644 --- a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc @@ -26,7 +26,7 @@ PUT /keep_types_example "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "extract_numbers"] + "filter" : ["lowercase", "extract_numbers"] } }, "filter" : { @@ -87,7 +87,7 @@ PUT /keep_types_exclude_example "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "remove_numbers"] + "filter" : ["lowercase", "remove_numbers"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc index 50c74942a0101..b7385379be94b 100644 --- a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc @@ -27,11 +27,11 @@ PUT /keep_words_example "analyzer" : { "example_1" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "words_till_three"] + "filter" : ["lowercase", "words_till_three"] }, "example_2" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "words_in_file"] + "filter" : ["lowercase", "words_in_file"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index 93e1eed26b4b2..99ed03649ff93 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -19,7 +19,7 @@ PUT /my_index "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "my_snow"] + "filter" : ["lowercase", "my_snow"] } }, "filter" : { diff --git a/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc deleted file mode 100644 index 0270bf71b4b3e..0000000000000 --- a/docs/reference/analysis/tokenfilters/standard-tokenfilter.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -[[analysis-standard-tokenfilter]] -=== Standard Token Filter - -A token filter of type `standard` that normalizes tokens extracted with -the -<>. - -[TIP] -================================================== - -The `standard` token filter currently does nothing. It remains as a placeholder -in case some filtering function needs to be added in a future version. - -================================================== diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index a13c6746d74be..f59e2f3f2cf88 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -13,7 +13,7 @@ PUT /my_index "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "my_stemmer"] + "filter" : ["lowercase", "my_stemmer"] } }, "filter" : { diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index 37901cb3abe62..c09922fe63fc3 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -143,13 +143,13 @@ GET index/_search }, "hits": { "total": 1, - "max_score": 0.80259144, + "max_score": 0.8025915, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.80259144, + "_score": 0.8025915, "_source": { "body": "Ski resort" } @@ -200,13 +200,13 @@ GET index/_search }, "hits": { "total": 1, - "max_score": 0.80259144, + "max_score": 0.8025915, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.80259144, + "_score": 0.8025915, "_source": { "body": "Ski resort" } diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index f5d5610ca1a2e..cf5cab106f891 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -295,27 +295,27 @@ Which yields: "details": [] }, { - "value": 2.0, + "value": 2, "description": "field.docCount", "details": [] }, { - "value": 4.0, + "value": 4, "description": "field.sumDocFreq", "details": [] }, { - "value": 5.0, + "value": 5, "description": "field.sumTotalTermFreq", "details": [] }, { - "value": 1.0, + "value": 1, "description": "term.docFreq", "details": [] }, { - "value": 2.0, + "value": 2, "description": "term.totalTermFreq", "details": [] }, @@ -325,7 +325,7 @@ Which yields: "details": [] }, { - "value": 3.0, + "value": 3, "description": "doc.length", "details": [] } @@ -469,27 +469,27 @@ GET /index/_search?explain=true "details": [] }, { - "value": 2.0, + "value": 2, "description": "field.docCount", "details": [] }, { - "value": 4.0, + "value": 4, "description": "field.sumDocFreq", "details": [] }, { - "value": 5.0, + "value": 5, "description": "field.sumTotalTermFreq", "details": [] }, { - "value": 1.0, + "value": 1, "description": "term.docFreq", "details": [] }, { - "value": 2.0, + "value": 2, "description": "term.totalTermFreq", "details": [] }, @@ -499,7 +499,7 @@ GET /index/_search?explain=true "details": [] }, { - "value": 3.0, + "value": 3, "description": "doc.length", "details": [] } diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 066d3ce1ac597..e4502d37360c9 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -446,7 +446,6 @@ PUT my_queries1 "type": "custom", "tokenizer": "standard", "filter": [ - "standard", "lowercase", "wildcard_edge_ngram" ] @@ -597,7 +596,6 @@ PUT my_queries2 "type": "custom", "tokenizer": "standard", "filter": [ - "standard", "lowercase", "reverse", "wildcard_edge_ngram" @@ -607,7 +605,6 @@ PUT my_queries2 "type": "custom", "tokenizer": "standard", "filter": [ - "standard", "lowercase", "reverse" ] diff --git a/docs/reference/migration/migrate_7_0/analysis.asciidoc b/docs/reference/migration/migrate_7_0/analysis.asciidoc index db617d3301fd7..6e6cc5b078d61 100644 --- a/docs/reference/migration/migrate_7_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_7_0/analysis.asciidoc @@ -22,3 +22,7 @@ The `delimited_payload_filter` was deprecated and renamed to `delimited_payload` Using it in indices created before 7.0 will issue deprecation warnings. Using the old name in new indices created in 7.0 will throw an error. Use the new name `delimited_payload` instead. + +==== `standard` filter has been removed + +The `standard` token filter has been removed because it doesn't change anything in the stream. diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 1a932fdd41400..f07d1d09747e7 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -555,3 +555,8 @@ See <>. See <>. +[role="exclude",id="analysis-standard-tokenfilter"] +=== Standard filter removed + +The standard token filter has been removed. + diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index fd09984f1696f..341c8e4802b09 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -30,62 +30,67 @@ This will yield the following result: [source,js] -------------------------------------------------- { - "_index": "twitter", - "_type": "_doc", - "_id": "0", - "matched": true, - "explanation": { - "value": 1.6943599, - "description": "weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:", - "details": [ + "_index":"twitter", + "_type":"_doc", + "_id":"0", + "matched":true, + "explanation":{ + "value":1.6943597, + "description":"weight(message:elasticsearch in 0) [PerFieldSimilarity], result of:", + "details":[ { - "value": 1.6943599, - "description": "score(doc=0,freq=1.0 = termFreq=1.0\n), product of:", - "details": [ + "value":1.6943597, + "description":"score(freq=1.0), product of:", + "details":[ { - "value": 1.3862944, - "description": "idf, computed as log(1 + (docCount - docFreq + 0.5) / (docFreq + 0.5)) from:", - "details": [ + "value":2.2, + "description":"scaling factor, k1 + 1", + "details":[] + }, + { + "value":1.3862944, + "description":"idf, computed as log(1 + (N - n + 0.5) / (n + 0.5)) from:", + "details":[ { - "value": 1.0, - "description": "docFreq", - "details": [] + "value":1, + "description":"n, number of documents containing term", + "details":[] }, { - "value": 5.0, - "description": "docCount", - "details": [] - } - ] + "value":5, + "description":"N, total number of documents with field", + "details":[] + } + ] }, - { - "value": 1.2222223, - "description": "tfNorm, computed as (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength)) from:", - "details": [ + { + "value":0.5555555, + "description":"tf, computed as freq / (freq + k1 * (1 - b + b * dl / avgdl)) from:", + "details":[ { - "value": 1.0, - "description": "termFreq=1.0", - "details": [] + "value":1.0, + "description":"freq, occurrences of term within document", + "details":[] }, { - "value": 1.2, - "description": "parameter k1", - "details": [] + "value":1.2, + "description":"k1, term saturation parameter", + "details":[] }, { - "value": 0.75, - "description": "parameter b", - "details": [] + "value":0.75, + "description":"b, length normalization parameter", + "details":[] }, { - "value": 5.4, - "description": "avgFieldLength", - "details": [] + "value":3.0, + "description":"dl, length of field", + "details":[] }, { - "value": 3.0, - "description": "fieldLength", - "details": [] + "value":5.4, + "description":"avgdl, average length of field", + "details":[] } ] } diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index e2df59ad3f4a3..bc7edcd3a88fa 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -72,7 +72,11 @@ This will yield the following result: "next_doc": 53876, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 }, "children": [ { @@ -91,7 +95,11 @@ This will yield the following result: "next_doc": 10111, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } }, { @@ -110,7 +118,11 @@ This will yield the following result: "next_doc": 2852, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } } ] @@ -288,7 +300,11 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen "next_doc": 53876, "next_doc_count": 5, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } -------------------------------------------------- // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/] @@ -548,7 +564,11 @@ And the response: "score_count": 1, "build_scorer": 377872, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } }, { @@ -567,7 +587,11 @@ And the response: "score_count": 1, "build_scorer": 112551, "advance": 0, - "advance_count": 0 + "advance_count": 0, + "compute_max_score": 0, + "compute_max_score_count": 0, + "shallow_advance": 0, + "shallow_advance_count": 0 } } ], diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 887ae2bdf149e..8e719a02c759b 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -265,19 +265,19 @@ Response not included in text but tested for completeness sake. ..., "hits": { "total": 1, - "max_score": 1.0444683, + "max_score": 1.0444684, "hits": [ { "_index": "test", "_type": "_doc", "_id": "1", - "_score": 1.0444683, + "_score": 1.0444684, "_source": ..., "inner_hits": { "comments": { <1> "hits": { "total": 1, - "max_score": 1.0444683, + "max_score": 1.0444684, "hits": [ { "_index": "test", @@ -287,7 +287,7 @@ Response not included in text but tested for completeness sake. "field": "comments", "offset": 1 }, - "_score": 1.0444683, + "_score": 1.0444684, "fields": { "comments.text.keyword": [ "words words words" diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index cba299e97cb8d..96d60467d1072 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -33,12 +33,12 @@ PUT test "trigram": { "type": "custom", "tokenizer": "standard", - "filter": ["standard", "shingle"] + "filter": ["shingle"] }, "reverse": { "type": "custom", "tokenizer": "standard", - "filter": ["standard", "reverse"] + "filter": ["reverse"] } }, "filter": { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java index aa19f62fedc4f..714e7759c54fb 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.matrix.stats; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; @@ -61,8 +62,8 @@ final class MatrixStatsAggregator extends MetricsAggregator { } @Override - public boolean needsScores() { - return (valuesSources == null) ? false : valuesSources.needsScores(); + public ScoreMode scoreMode() { + return (valuesSources != null && valuesSources.needsScores()) ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java index 01b529188c6f0..2259560bcbc7d 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ChineseAnalyzerProvider.java @@ -19,6 +19,7 @@ package org.elasticsearch.analysis.common; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -35,7 +36,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider getPreBuiltAnalyzerProviderFactorie () -> new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("snowball", CachingStrategy.LUCENE, - () -> new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET))); + () -> new SnowballAnalyzer("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET))); // Language analyzers: analyzers.add(new PreBuiltAnalyzerProviderFactory("arabic", CachingStrategy.LUCENE, ArabicAnalyzer::new)); @@ -304,7 +303,8 @@ public List getPreBuiltAnalyzerProviderFactorie analyzers.add(new PreBuiltAnalyzerProviderFactory("bulgarian", CachingStrategy.LUCENE, BulgarianAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("catalan", CachingStrategy.LUCENE, CatalanAnalyzer::new)); // chinese analyzer: only for old indices, best effort - analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, StandardAnalyzer::new)); + analyzers.add(new PreBuiltAnalyzerProviderFactory("chinese", CachingStrategy.ONE, + () -> new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET))); analyzers.add(new PreBuiltAnalyzerProviderFactory("cjk", CachingStrategy.LUCENE, CJKAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("czech", CachingStrategy.LUCENE, CzechAnalyzer::new)); analyzers.add(new PreBuiltAnalyzerProviderFactory("danish", CachingStrategy.LUCENE, DanishAnalyzer::new)); @@ -376,14 +376,14 @@ public List getPreConfiguredTokenFilters() { DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER))); filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input -> - new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE))); + new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation", "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [edge_ngram] instead."); } - return new EdgeNGramTokenFilter(reader, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenFilter(reader, 1); })); filters.add(PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES))); @@ -400,14 +400,14 @@ public List getPreConfiguredTokenFilters() { new LimitTokenCountFilter(input, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); - filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new)); + filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation", "The [nGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [ngram] instead."); } - return new NGramTokenFilter(reader); + return new NGramTokenFilter(reader, 1, 2, false); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); @@ -430,7 +430,8 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("sorani_normalization", true, SoraniNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("stemmer", false, PorterStemFilter::new)); // The stop filter is in lucene-core but the English stop words set is in lucene-analyzers-common - filters.add(PreConfiguredTokenFilter.singleton("stop", false, input -> new StopFilter(input, StopAnalyzer.ENGLISH_STOP_WORDS_SET))); + filters.add(PreConfiguredTokenFilter.singleton("stop", false, + input -> new StopFilter(input, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET))); filters.add(PreConfiguredTokenFilter.singleton("trim", true, TrimFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10))); filters.add(PreConfiguredTokenFilter.singleton("type_as_payload", false, TypeAsPayloadTokenFilter::new)); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java index af6d30a035476..6bcd2b737feeb 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenFilterFactory.java @@ -21,7 +21,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; -import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -41,8 +40,8 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory { EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); - this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); - this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); + this.minGram = settings.getAsInt("min_gram", 1); + this.maxGram = settings.getAsInt("max_gram", 2); this.side = parseSide(settings.get("side", "front")); } @@ -63,7 +62,8 @@ public TokenStream create(TokenStream tokenStream) { result = new ReverseStringFilter(result); } - result = new EdgeNGramTokenFilter(result, minGram, maxGram); + // TODO: Expose preserveOriginal + result = new EdgeNGramTokenFilter(result, minGram, maxGram, false); // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect if (side == SIDE_BACK) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java index 22b060613163c..8d99ec1d1a15d 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java @@ -39,8 +39,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff(); - this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); - this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); + this.minGram = settings.getAsInt("min_gram", 1); + this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { @@ -57,6 +57,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { @Override public TokenStream create(TokenStream tokenStream) { - return new NGramTokenFilter(tokenStream, minGram, maxGram); + // TODO: Expose preserveOriginal + return new NGramTokenFilter(tokenStream, minGram, maxGram, false); } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java index bc4b9a763fd68..74e6bbcc65c2a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzer.java @@ -27,11 +27,10 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.en.EnglishPossessiveFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; -import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; -/** Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link +/** Filters {@link StandardTokenizer} with {@link * LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}. * * Available stemmers are listed in org.tartarus.snowball.ext. The name of a @@ -57,8 +56,7 @@ public final class SnowballAnalyzer extends Analyzer { stopSet = CharArraySet.unmodifiableSet(CharArraySet.copy(stopWords)); } - /** Constructs a {@link StandardTokenizer} filtered by a {@link - StandardFilter}, a {@link LowerCaseFilter}, a {@link StopFilter}, + /** Constructs a {@link StandardTokenizer} filtered by a {@link LowerCaseFilter}, a {@link StopFilter}, and a {@link SnowballFilter} */ @Override public TokenStreamComponents createComponents(String fieldName) { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java index 0f213df9ad722..6eec01570a881 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/SnowballAnalyzerProvider.java @@ -19,8 +19,8 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.CharArraySet; -import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.de.GermanAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.fr.FrenchAnalyzer; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.elasticsearch.common.settings.Settings; @@ -42,7 +42,7 @@ * Configuration of language is done with the "language" attribute or the analyzer. * Also supports additional stopwords via "stopwords" attribute *

- * The SnowballAnalyzer comes with a StandardFilter, LowerCaseFilter, StopFilter + * The SnowballAnalyzer comes with a LowerCaseFilter, StopFilter * and the SnowballFilter. * * @@ -52,7 +52,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider defaultLanguageStopwords = new HashMap<>(); - defaultLanguageStopwords.put("English", StopAnalyzer.ENGLISH_STOP_WORDS_SET); + defaultLanguageStopwords.put("English", EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); defaultLanguageStopwords.put("Dutch", DutchAnalyzer.getDefaultStopSet()); defaultLanguageStopwords.put("German", GermanAnalyzer.getDefaultStopSet()); defaultLanguageStopwords.put("German2", GermanAnalyzer.getDefaultStopSet()); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java index f0b2b7188e5ba..e2ee540fe3e70 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StandardHtmlStripAnalyzer.java @@ -25,8 +25,7 @@ import org.apache.lucene.analysis.StopwordAnalyzerBase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.StopAnalyzer; -import org.apache.lucene.analysis.standard.StandardFilter; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { @@ -36,7 +35,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { */ @Deprecated public StandardHtmlStripAnalyzer() { - super(StopAnalyzer.ENGLISH_STOP_WORDS_SET); + super(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); } StandardHtmlStripAnalyzer(CharArraySet stopwords) { @@ -46,8 +45,7 @@ public StandardHtmlStripAnalyzer() { @Override protected TokenStreamComponents createComponents(final String fieldName) { final Tokenizer src = new StandardTokenizer(); - TokenStream tok = new StandardFilter(src); - tok = new LowerCaseFilter(tok); + TokenStream tok = new LowerCaseFilter(src); if (!stopwords.isEmpty()) { tok = new StopFilter(tok, stopwords); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java index d2d226d6250e8..29122d7292168 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java @@ -20,7 +20,7 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.util.BytesRef; import org.elasticsearch.test.ESTokenStreamTestCase; @@ -44,7 +44,7 @@ public void testNonWordPattern() throws IOException { // split on non-letter pattern, lowercase, english stopwords PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\W+"), true, - StopAnalyzer.ENGLISH_STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] { "quick", "brown", "fox", "abcd1234", "56", "78", "dc" }); } @@ -61,7 +61,7 @@ public void testWhitespacePattern() throws IOException { // Split on whitespace patterns, lowercase, english stopwords PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\s+"), true, - StopAnalyzer.ENGLISH_STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] { "quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." }); } @@ -78,7 +78,7 @@ public void testCustomPattern() throws IOException { // split on comma, lowercase, english stopwords PatternAnalyzer b = new PatternAnalyzer(Pattern.compile(","), true, - StopAnalyzer.ENGLISH_STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(b, "Here,Are,some,Comma,separated,words,", new String[] { "here", "some", "comma", "separated", "words" }); } @@ -109,7 +109,7 @@ public void testHugeDocument() throws IOException { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java index 0b9998eda31c5..360d17ef0f4f3 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SnowballAnalyzerTests.java @@ -20,7 +20,7 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.test.ESTokenStreamTestCase; public class SnowballAnalyzerTests extends ESTokenStreamTestCase { @@ -33,7 +33,7 @@ public void testEnglish() throws Exception { public void testStopwords() throws Exception { Analyzer a = new SnowballAnalyzer("English", - StandardAnalyzer.STOP_WORDS_SET); + EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); assertAnalyzesTo(a, "the quick brown fox jumped", new String[]{"quick", "brown", "fox", "jump"}); } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 0ebdddcc5f1b5..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fded6bb485b8b01bb2a9280162fd14d4d3ce4510 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-4d78db26be.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..bec50d36793d8 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +5f469e925dde5dff81b9d56f465a8babb56cd26b \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java index 094a62d188baf..2c60136209ca7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessExecuteAction.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.RAMDirectory; @@ -550,7 +551,7 @@ static Response innerShardOperation(Request request, ScriptService scriptService Query luceneQuery = request.contextSetup.query.rewrite(context).toQuery(context); IndexSearcher indexSearcher = new IndexSearcher(leafReaderContext.reader()); luceneQuery = indexSearcher.rewrite(luceneQuery); - Weight weight = indexSearcher.createWeight(luceneQuery, true, 1f); + Weight weight = indexSearcher.createWeight(luceneQuery, ScoreMode.COMPLETE, 1f); Scorer scorer = weight.scorer(indexSearcher.getIndexReader().leaves().get(0)); // Consume the first (and only) match. int docID = scorer.iterator().nextDoc(); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java index 567f462046146..76bb6d14dcf61 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScoreTests.java @@ -49,6 +49,11 @@ public void testScoreWorks() { public float score() throws IOException { return 2.5f; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return 2.5f; + } }, true)); } @@ -60,6 +65,11 @@ public void testScoreNotUsed() { public float score() throws IOException { throw new AssertionError("score() should not be called"); } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } }, true)); } @@ -75,6 +85,11 @@ public float score() throws IOException { } throw new AssertionError("score() should not be called twice"); } + + @Override + public float getMaxScore(int upTo) throws IOException { + return 4.5f; + } }, true)); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 6ee021c695f99..4820bc10cf24f 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptedMetricAggContexts; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -74,6 +75,11 @@ public void testMapBasic() { @Override public DocIdSetIterator iterator() { return null; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return 0.5f; + } }; ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, null); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index 0795ab7777526..1b4c4eb0ff636 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -89,7 +89,7 @@ public void testBasics() throws IOException { .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) .build(), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); w.close(); dir.close(); @@ -128,7 +128,7 @@ public void testWeightScript() throws IOException { .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) .build(), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); w.close(); dir.close(); diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml index a9aa00aa5e036..9a43e1f9aa445 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml @@ -161,7 +161,7 @@ "script_score": { "script": { "lang": "painless", - "source": "-doc['num1'].value" + "source": "3 - doc['num1'].value" } } }] diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index b555afce67ae7..4469c9633dd87 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -78,8 +79,8 @@ public ParentToChildrenAggregator(String name, AggregatorFactories factories, throws IOException { super(name, factories, context, parent, pipelineAggregators, metaData); // these two filters are cached in the parser - this.childFilter = context.searcher().createNormalizedWeight(childFilter, false); - this.parentFilter = context.searcher().createNormalizedWeight(parentFilter, false); + this.childFilter = context.searcher().createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); + this.parentFilter = context.searcher().createWeight(context.searcher().rewrite(parentFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); this.parentOrdToBuckets = context.bigArrays().newLongArray(maxOrd, false); this.parentOrdToBuckets.fill(0, maxOrd, -1); this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(context.bigArrays()); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index 5e57a2774055d..9e9b55872cfca 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -23,16 +23,21 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.InnerHitContextBuilder; @@ -92,14 +97,14 @@ static final class JoinFieldInnerHitSubContext extends InnerHitsContext.InnerHit } @Override - public TopDocs[] topDocs(SearchHit[] hits) throws IOException { + public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { Weight innerHitQueryWeight = createInnerHitQueryWeight(); - TopDocs[] result = new TopDocs[hits.length]; + TopDocsAndMaxScore[] result = new TopDocsAndMaxScore[hits.length]; for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; String joinName = getSortedDocValue(joinFieldMapper.name(), context, hit.docId()); if (joinName == null) { - result[i] = Lucene.EMPTY_TOP_DOCS; + result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN); continue; } @@ -107,7 +112,7 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { ParentIdFieldMapper parentIdFieldMapper = joinFieldMapper.getParentIdFieldMapper(typeName, fetchChildInnerHits == false); if (parentIdFieldMapper == null) { - result[i] = Lucene.EMPTY_TOP_DOCS; + result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN); continue; } @@ -125,29 +130,41 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { q = context.mapperService().fullName(IdFieldMapper.NAME).termQuery(parentId, qsc); } - Weight weight = context.searcher().createNormalizedWeight(q, false); + Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), ScoreMode.COMPLETE_NO_SCORES, 1f); if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); } - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN); + result[i] = new TopDocsAndMaxScore( + new TopDocs(new TotalHits(totalHitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO), + Lucene.EMPTY_SCORE_DOCS), Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; + MaxScoreCollector maxScoreCollector = null; if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true); + topDocsCollector = TopFieldCollector.create(sort().sort, topN, Integer.MAX_VALUE); + if (trackScores()) { + maxScoreCollector = new MaxScoreCollector(); + } } else { - topDocsCollector = TopScoreDocCollector.create(topN); + topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE); + maxScoreCollector = new MaxScoreCollector(); } try { for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { - intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); + intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); } } finally { clearReleasables(Lifetime.COLLECTION); } - result[i] = topDocsCollector.topDocs(from(), size()); + TopDocs topDocs = topDocsCollector.topDocs(from(), size()); + float maxScore = Float.NaN; + if (maxScoreCollector != null) { + maxScore = maxScoreCollector.getMaxScore(); + } + result[i] = new TopDocsAndMaxScore(topDocs, maxScore); } } return result; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java index 5bbf998883eee..bf491727ff576 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQuery.java @@ -26,11 +26,14 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Bits; import org.elasticsearch.common.CheckedFunction; @@ -53,14 +56,17 @@ final class PercolateQuery extends Query implements Accountable { private final Query candidateMatchesQuery; private final Query verifiedMatchesQuery; private final IndexSearcher percolatorIndexSearcher; + private final Query nonNestedDocsFilter; PercolateQuery(String name, QueryStore queryStore, List documents, - Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, Query verifiedMatchesQuery) { + Query candidateMatchesQuery, IndexSearcher percolatorIndexSearcher, + Query nonNestedDocsFilter, Query verifiedMatchesQuery) { this.name = name; this.documents = Objects.requireNonNull(documents); this.candidateMatchesQuery = Objects.requireNonNull(candidateMatchesQuery); this.queryStore = Objects.requireNonNull(queryStore); this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher); + this.nonNestedDocsFilter = nonNestedDocsFilter; this.verifiedMatchesQuery = Objects.requireNonNull(verifiedMatchesQuery); } @@ -68,16 +74,17 @@ final class PercolateQuery extends Query implements Accountable { public Query rewrite(IndexReader reader) throws IOException { Query rewritten = candidateMatchesQuery.rewrite(reader); if (rewritten != candidateMatchesQuery) { - return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, verifiedMatchesQuery); + return new PercolateQuery(name, queryStore, documents, rewritten, percolatorIndexSearcher, + nonNestedDocsFilter, verifiedMatchesQuery); } else { return this; } } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, false, boost); - final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, false, boost); + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + final Weight verifiedMatchesWeight = verifiedMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost); + final Weight candidateMatchesWeight = candidateMatchesQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost); return new Weight(this) { @Override public void extractTerms(Set set) { @@ -91,7 +98,7 @@ public Explanation explain(LeafReaderContext leafReaderContext, int docId) throw int result = twoPhaseIterator.approximation().advance(docId); if (result == docId) { if (twoPhaseIterator.matches()) { - if (needsScores) { + if (scoreMode.needsScores()) { CheckedFunction percolatorQueries = queryStore.getQueries(leafReaderContext); Query query = percolatorQueries.apply(docId); Explanation detail = percolatorIndexSearcher.explain(query, 0); @@ -112,9 +119,9 @@ public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException { return null; } - final CheckedFunction queries = queryStore.getQueries(leafReaderContext); - if (needsScores) { - return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) { + final CheckedFunction percolatorQueries = queryStore.getQueries(leafReaderContext); + if (scoreMode.needsScores()) { + return new BaseScorer(this, approximation) { float score; @@ -122,8 +129,14 @@ public Scorer scorer(LeafReaderContext leafReaderContext) throws IOException { boolean matchDocId(int docId) throws IOException { Query query = percolatorQueries.apply(docId); if (query != null) { + if (nonNestedDocsFilter != null) { + query = new BooleanQuery.Builder() + .add(query, Occur.MUST) + .add(nonNestedDocsFilter, Occur.FILTER) + .build(); + } TopDocs topDocs = percolatorIndexSearcher.search(query, 1); - if (topDocs.totalHits > 0) { + if (topDocs.scoreDocs.length > 0) { score = topDocs.scoreDocs[0].score; return true; } else { @@ -142,7 +155,7 @@ public float score() throws IOException { } else { ScorerSupplier verifiedDocsScorer = verifiedMatchesWeight.scorerSupplier(leafReaderContext); Bits verifiedDocsBits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), verifiedDocsScorer); - return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) { + return new BaseScorer(this, approximation) { @Override public float score() throws IOException { @@ -159,7 +172,16 @@ boolean matchDocId(int docId) throws IOException { return true; } Query query = percolatorQueries.apply(docId); - return query != null && Lucene.exists(percolatorIndexSearcher, query); + if (query == null) { + return false; + } + if (nonNestedDocsFilter != null) { + query = new BooleanQuery.Builder() + .add(query, Occur.MUST) + .add(nonNestedDocsFilter, Occur.FILTER) + .build(); + } + return Lucene.exists(percolatorIndexSearcher, query); } }; } @@ -182,6 +204,10 @@ IndexSearcher getPercolatorIndexSearcher() { return percolatorIndexSearcher; } + boolean excludesNestedDocs() { + return nonNestedDocsFilter != null; + } + List getDocuments() { return documents; } @@ -241,15 +267,10 @@ interface QueryStore { abstract static class BaseScorer extends Scorer { final Scorer approximation; - final CheckedFunction percolatorQueries; - final IndexSearcher percolatorIndexSearcher; - BaseScorer(Weight weight, Scorer approximation, CheckedFunction percolatorQueries, - IndexSearcher percolatorIndexSearcher) { + BaseScorer(Weight weight, Scorer approximation) { super(weight); this.approximation = approximation; - this.percolatorQueries = percolatorQueries; - this.percolatorIndexSearcher = percolatorIndexSearcher; } @Override @@ -279,6 +300,10 @@ public final int docID() { abstract boolean matchDocId(int docId) throws IOException; + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 445076b8eba07..09cc04458ec70 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -29,10 +29,9 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; @@ -56,7 +55,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -605,13 +603,19 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { } }; final IndexSearcher docSearcher; + final boolean excludeNestedDocuments; if (docs.size() > 1 || docs.get(0).docs().size() > 1) { assert docs.size() != 1 || docMapper.hasNestedObjects(); docSearcher = createMultiDocumentSearcher(analyzer, docs); + excludeNestedDocuments = docMapper.hasNestedObjects() && docs.stream() + .map(ParsedDocument::docs) + .mapToInt(List::size) + .anyMatch(size -> size > 1); } else { MemoryIndex memoryIndex = MemoryIndex.fromDocument(docs.get(0).rootDoc(), analyzer, true, false); docSearcher = memoryIndex.createSearcher(); docSearcher.setQueryCache(null); + excludeNestedDocuments = false; } PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType; @@ -621,7 +625,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { percolateShardContext, pft.mapUnmappedFieldsAsText); - return pft.percolateQuery(name, queryStore, documents, docSearcher, context.indexVersionCreated()); + return pft.percolateQuery(name, queryStore, documents, docSearcher, excludeNestedDocuments, context.indexVersionCreated()); } public String getField() { @@ -653,17 +657,7 @@ static IndexSearcher createMultiDocumentSearcher(Analyzer analyzer, Collection

documents, - IndexSearcher searcher, Version indexVersion) throws IOException { + IndexSearcher searcher, boolean excludeNestedDocuments, Version indexVersion) throws IOException { IndexReader indexReader = searcher.getIndexReader(); Tuple t = createCandidateQuery(indexReader, indexVersion); Query candidateQuery = t.v1(); @@ -261,7 +262,11 @@ Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List createCandidateQuery(IndexReader indexReader, Version indexVersion) throws IOException { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index 4d5e3d2a988f9..fdcc9156b415e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -74,7 +75,8 @@ static void innerHitsExecute(Query mainQuery, IndexSearcher indexSearcher, Searc // See https://issues.apache.org/jira/browse/LUCENE-8055 // for now we just use version 6.0 version to find nested parent final Version version = Version.V_6_0_0; //context.mapperService().getIndexSettings().getIndexVersionCreated(); - Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(version), false); + Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter(version)), + ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0)); int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc(); BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc); @@ -96,7 +98,7 @@ static void innerHitsExecute(Query mainQuery, IndexSearcher indexSearcher, Searc } TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC)); - if (topDocs.totalHits == 0) { + if (topDocs.totalHits.value == 0) { // This hit didn't match with a percolate query, // likely to happen when percolating multiple documents continue; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 9c8979601e8dc..3d9a8fb8ebb08 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -61,6 +61,7 @@ import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -595,51 +596,52 @@ public void testRangeQueries() throws Exception { Version v = Version.V_6_1_0; MemoryIndex memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new IntPoint("int_field", 3)), new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + Query query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + percolateSearcher, false, v); TopDocs topDocs = shardSearcher.search(query, 1); - assertEquals(1L, topDocs.totalHits); + assertEquals(1L, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new LongPoint("long_field", 7L)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1L, topDocs.totalHits); + assertEquals(1L, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(1, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new HalfFloatPoint("half_float_field", 12)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1L, topDocs.totalHits); + assertEquals(1L, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(2, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new FloatPoint("float_field", 17)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(3, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new DoublePoint("double_field", 21)), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(4, topDocs.scoreDocs[0].doc); memoryIndex = MemoryIndex.fromDocument(Collections.singleton(new InetAddressPoint("ip_field", forString("192.168.0.4"))), new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + query = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, v); topDocs = shardSearcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertEquals(1, topDocs.scoreDocs.length); assertEquals(5, topDocs.scoreDocs[0].doc); } @@ -777,16 +779,16 @@ public void testPercolateMatchAll() throws Exception { memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(3L, topDocs.totalHits); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(3L, topDocs.totalHits.value); assertEquals(3, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(1, topDocs.scoreDocs[1].doc); assertEquals(4, topDocs.scoreDocs[2].doc); topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10); - assertEquals(3L, topDocs.totalHits); + assertEquals(3L, topDocs.totalHits.value); assertEquals(3, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(1, topDocs.scoreDocs[1].doc); @@ -810,9 +812,9 @@ public void testFunctionScoreQuery() throws Exception { memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(2L, topDocs.totalHits); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); @@ -860,17 +862,18 @@ public void testPercolateSmallAndLargeDocument() throws Exception { try (IndexReader ir = DirectoryReader.open(directory)){ IndexSearcher percolateSearcher = new IndexSearcher(ir); PercolateQuery query = (PercolateQuery) - fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + percolateSearcher, false, v); BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery(); assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(CoveringQuery.class)); TopDocs topDocs = shardSearcher.search(query, 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); @@ -890,18 +893,19 @@ public void testPercolateSmallAndLargeDocument() throws Exception { try (IndexReader ir = DirectoryReader.open(directory)){ IndexSearcher percolateSearcher = new IndexSearcher(ir); PercolateQuery query = (PercolateQuery) - fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, v); + fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), + percolateSearcher, false, v); BooleanQuery candidateQuery = (BooleanQuery) query.getCandidateMatchesQuery(); assertThat(candidateQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class)); TopDocs topDocs = shardSearcher.search(query, 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(1, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); topDocs = shardSearcher.search(new ConstantScoreQuery(query), 10); - assertEquals(2L, topDocs.totalHits); + assertEquals(2L, topDocs.totalHits.value); assertEquals(2, topDocs.scoreDocs.length); assertEquals(1, topDocs.scoreDocs[0].doc); assertEquals(2, topDocs.scoreDocs[1].doc); @@ -951,9 +955,9 @@ public void testDuplicatedClauses() throws Exception { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1 value2 value3", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(2L, topDocs.totalHits); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(2L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); assertEquals(1, topDocs.scoreDocs[1].doc); } @@ -985,25 +989,25 @@ public void testDuplicatedClauses2() throws Exception { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1 value4 value5", new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1 value2", new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value3", new WhitespaceAnalyzer()); percolateSearcher = memoryIndex.createSearcher(); - query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); } @@ -1036,9 +1040,9 @@ public void testMsmAndRanges_disjunction() throws Exception { document.add(new IntPoint("int_field", 7)); MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); - PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); - TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); - assertEquals(1L, topDocs.totalHits); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, false, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); + assertEquals(1L, topDocs.totalHits.value); assertEquals(0, topDocs.scoreDocs[0].doc); } @@ -1046,7 +1050,7 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd boolean requireScore = randomBoolean(); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); Query percolateQuery = fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery); TopDocs topDocs = shardSearcher.search(query, 100); @@ -1055,7 +1059,7 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd TopDocs controlTopDocs = shardSearcher.search(controlQuery, 100); try { - assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits)); + assertThat(topDocs.totalHits.value, equalTo(controlTopDocs.totalHits.value)); assertThat(topDocs.scoreDocs.length, equalTo(controlTopDocs.scoreDocs.length)); for (int j = 0; j < topDocs.scoreDocs.length; j++) { assertThat(topDocs.scoreDocs[j].doc, equalTo(controlTopDocs.scoreDocs[j].doc)); @@ -1130,7 +1134,7 @@ private TopDocs executeQuery(PercolateQuery.QueryStore queryStore, IndexSearcher shardSearcher) throws IOException { IndexSearcher percolateSearcher = memoryIndex.createSearcher(); Query percolateQuery = fieldType.percolateQuery("_name", queryStore, - Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); + Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, Version.CURRENT); return shardSearcher.search(percolateQuery, 10); } @@ -1174,7 +1178,7 @@ private ControlQuery(MemoryIndex memoryIndex, PercolateQuery.QueryStore querySto } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { final IndexSearcher percolatorIndexSearcher = memoryIndex.createSearcher(); return new Weight(this) { @@ -1210,8 +1214,8 @@ protected boolean match(int doc) { try { Query query = leaf.apply(doc); TopDocs topDocs = percolatorIndexSearcher.search(query, 1); - if (topDocs.totalHits > 0) { - if (needsScores) { + if (topDocs.scoreDocs.length > 0) { + if (scoreMode.needsScores()) { _score[0] = topDocs.scoreDocs[0].score; } return true; @@ -1239,6 +1243,11 @@ public DocIdSetIterator iterator() { public float score() throws IOException { return _score[0]; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return _score[0]; + } }; } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index eb7af5f30d061..be9c3f83f3f4b 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -19,12 +19,6 @@ package org.elasticsearch.percolator; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -40,8 +34,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; @@ -63,7 +55,6 @@ import java.util.Set; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.sameInstance; public class PercolateQueryBuilderTests extends AbstractQueryTestCase { @@ -72,8 +63,8 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase parseQuery("{\"percolate\" : { \"document\": {}, \"documents\": [{}, {}], \"field\":\"" + queryField + "\"}}")); } - public void testCreateNestedDocumentSearcher() throws Exception { - int numNestedDocs = randomIntBetween(2, 8); - List docs = new ArrayList<>(numNestedDocs); - for (int i = 0; i < numNestedDocs; i++) { - docs.add(new ParseContext.Document()); - } - - Collection parsedDocument = Collections.singleton( - new ParsedDocument(null, null, "_id", "_type", null, docs, null, null, null)); - Analyzer analyzer = new WhitespaceAnalyzer(); - IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, parsedDocument); - assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numNestedDocs)); - - // ensure that any query get modified so that the nested docs are never included as hits: - Query query = new MatchAllDocsQuery(); - BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery(); - assertThat(result.clauses().size(), equalTo(2)); - assertThat(result.clauses().get(0).getQuery(), sameInstance(query)); - assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); - assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); - } - - public void testCreateMultiDocumentSearcher() throws Exception { - int numDocs = randomIntBetween(2, 8); - List docs = new ArrayList<>(); - for (int i = 0; i < numDocs; i++) { - docs.add(new ParsedDocument(null, null, "_id", "_type", null, - Collections.singletonList(new ParseContext.Document()), null, null, null)); - } - Analyzer analyzer = new WhitespaceAnalyzer(); - IndexSearcher indexSearcher = PercolateQueryBuilder.createMultiDocumentSearcher(analyzer, docs); - assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(numDocs)); - - // ensure that any query get modified so that the nested docs are never included as hits: - Query query = new MatchAllDocsQuery(); - BooleanQuery result = (BooleanQuery) indexSearcher.createNormalizedWeight(query, true).getQuery(); - assertThat(result.clauses().size(), equalTo(2)); - assertThat(result.clauses().get(0).getQuery(), sameInstance(query)); - assertThat(result.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); - assertThat(result.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.MUST_NOT)); - } - private static BytesReference randomSource(Set usedFields) { try { // If we create two source that have the same field, but these fields have different kind of values (str vs. lng) then @@ -352,4 +301,5 @@ public void testFieldAlias() throws IOException { assertEquals(query.getCandidateMatchesQuery(), aliasQuery.getCandidateMatchesQuery()); assertEquals(query.getVerifiedMatchesQuery(), aliasQuery.getVerifiedMatchesQuery()); } + } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java index ac9cc97499ce6..4c2c135554587 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryTests.java @@ -117,9 +117,9 @@ public void testPercolateQuery() throws Exception { IndexSearcher percolateSearcher = memoryIndex.createSearcher(); // no scoring, wrapping it in a constant score query: Query query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("a")), - new TermQuery(new Term("select", "a")), percolateSearcher, new MatchNoDocsQuery(""))); + new TermQuery(new Term("select", "a")), percolateSearcher, null, new MatchNoDocsQuery(""))); TopDocs topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); assertThat(topDocs.scoreDocs.length, equalTo(1)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); Explanation explanation = shardSearcher.explain(query, 0); @@ -127,9 +127,9 @@ public void testPercolateQuery() throws Exception { assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[0].score)); query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("b")), - new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery(""))); + new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery(""))); topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); explanation = shardSearcher.explain(query, 1); @@ -147,14 +147,14 @@ public void testPercolateQuery() throws Exception { assertThat(explanation.getValue(), equalTo(topDocs.scoreDocs[2].score)); query = new ConstantScoreQuery(new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("c")), - new MatchAllDocsQuery(), percolateSearcher, new MatchAllDocsQuery())); + new MatchAllDocsQuery(), percolateSearcher, null, new MatchAllDocsQuery())); topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(4L)); + assertThat(topDocs.totalHits.value, equalTo(4L)); query = new PercolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), - new TermQuery(new Term("select", "b")), percolateSearcher, new MatchNoDocsQuery("")); + new TermQuery(new Term("select", "b")), percolateSearcher, null, new MatchNoDocsQuery("")); topDocs = shardSearcher.search(query, 10); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(3)); explanation = shardSearcher.explain(query, 3); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateWithNestedQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateWithNestedQueryBuilderTests.java new file mode 100644 index 0000000000000..e58b6c6ad6a70 --- /dev/null +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateWithNestedQueryBuilderTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.percolator; + +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; + +public class PercolateWithNestedQueryBuilderTests extends PercolateQueryBuilderTests { + + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + super.initializeAdditionalMappings(mapperService); + mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef( + "_doc", "some_nested_object", "type=nested"))), MapperService.MergeReason.MAPPING_UPDATE); + } + + public void testDetectsNestedDocuments() throws IOException { + QueryShardContext shardContext = createShardContext(); + + PercolateQueryBuilder builder = new PercolateQueryBuilder(queryField, + new BytesArray("{ \"foo\": \"bar\" }"), XContentType.JSON); + QueryBuilder rewrittenBuilder = rewriteAndFetch(builder, shardContext); + PercolateQuery query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext); + assertFalse(query.excludesNestedDocs()); + + builder = new PercolateQueryBuilder(queryField, + new BytesArray("{ \"foo\": \"bar\", \"some_nested_object\": [ { \"baz\": 42 } ] }"), XContentType.JSON); + rewrittenBuilder = rewriteAndFetch(builder, shardContext); + query = (PercolateQuery) rewrittenBuilder.toQuery(shardContext); + assertTrue(query.excludesNestedDocs()); + } +} diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java index f1b89d92ab11e..e5f2160cfcaab 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -46,7 +46,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testHitsExecutionNeeded() { PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), - new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY, emptyMap()); SearchContext searchContext = Mockito.mock(SearchContext.class); @@ -60,7 +60,7 @@ public void testHitsExecutionNeeded() { public void testLocatePercolatorQuery() { PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), - new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(new MatchAllDocsQuery()).size(), equalTo(0)); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); @@ -94,7 +94,7 @@ public void testLocatePercolatorQuery() { assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(disjunctionMaxQuery).get(0), sameInstance(percolateQuery)); PercolateQuery percolateQuery2 = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), - new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); + new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery()); bq = new BooleanQuery.Builder(); bq.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER); assertThat(PercolatorHighlightSubFetchPhase.locatePercolatorQuery(bq.build()).size(), equalTo(0)); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index a428726225b95..89356bf274d8d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.search.SearchHit; @@ -58,7 +59,7 @@ public void testHitsExecute() throws Exception { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), - new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); assertNotNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); @@ -72,7 +73,7 @@ public void testHitsExecute() throws Exception { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), - new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); @@ -85,7 +86,7 @@ public void testHitsExecute() throws Exception { MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), - new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery()); PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); @@ -100,7 +101,7 @@ public void testConvertTopDocsToSlots() { scoreDocs[i] = new ScoreDoc(i, 1f); } - TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f); + TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs); IntStream stream = PercolatorMatchedSlotSubFetchPhase.convertTopDocsToSlots(topDocs, null); int[] result = stream.toArray(); @@ -117,7 +118,7 @@ public void testConvertTopDocsToSlots_nestedDocs() { scoreDocs[2] = new ScoreDoc(8, 1f); scoreDocs[3] = new ScoreDoc(11, 1f); scoreDocs[4] = new ScoreDoc(14, 1f); - TopDocs topDocs = new TopDocs(scoreDocs.length, scoreDocs, 1f); + TopDocs topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs); FixedBitSet bitSet = new FixedBitSet(15); bitSet.set(2); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 1e79e1e70ef8f..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a010e852be8d56efe1906e6da5292e4541239724 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..be2e7ec355ac5 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +97a3758487272ba4d15720b0ca15b0f980310c89 \ No newline at end of file diff --git a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml index 67ff1dab98483..89ef510c72b02 100644 --- a/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml +++ b/plugins/analysis-icu/src/test/resources/rest-api-spec/test/analysis_icu/20_search.yml @@ -12,7 +12,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_collator"] + filter: ["lowercase", "my_collator"] filter: my_collator: type: icu_collation diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 2d9669e436229..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -88e0ed90d433a9088528485cd4f59311735d92a4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..a7f63df28d7e5 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +12ed739794cd317754684308ddc5bdbdcc46cdde \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index f7b8fdd4bc187..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0daec9ac3c4bba5f91b1bc413c651b7a98313982 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..8fc57bbf7e46d --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +4da6e5c17a17f0a9a99b518ea9985ea06996b63b \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 80cf627011b4e..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5af81eec04c1da0d6969cff18f360ff379b1bf7 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..d94b274bf13ff --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +a36b2db18a2a22966ab0bf9fced775f22dd7029d \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml index 1f326fe3776d1..1be0d8525a1c6 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/10_metaphone.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_metaphone"] + filter: ["lowercase", "my_metaphone"] filter: my_metaphone: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml index 5af9f48aa808e..84b0129414c8e 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/20_double_metaphone.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_metaphone"] + filter: ["lowercase", "my_metaphone"] filter: my_metaphone: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml index 259b0adea745d..bdd1ddef388df 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/30_beider_morse.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "beider_morse"] + filter: ["lowercase", "beider_morse"] filter: beider_morse: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml index 75c672172391c..34a5bfa1da14c 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/40_search.yml @@ -12,7 +12,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "my_metaphone"] + filter: ["lowercase", "my_metaphone"] filter: my_metaphone: type: phonetic diff --git a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml index c67b6892bc993..bee4c8bf5f432 100644 --- a/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml +++ b/plugins/analysis-phonetic/src/test/resources/rest-api-spec/test/analysis_phonetic/50_daitch_mokotoff.yml @@ -13,7 +13,7 @@ analyzer: my_analyzer: tokenizer: standard - filter: ["standard", "lowercase", "daitch_mokotoff"] + filter: ["lowercase", "daitch_mokotoff"] filter: daitch_mokotoff: type: phonetic diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 14be684b96f3d..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e649088ee298293aa95a05391dff9cb0582648e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..f75d7abd6a36b --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +5f1d360a47d2fd166e970d17c46b284830e64258 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index ea55c790537f4..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47fb370054ba7413d050f13c177edf01180c31ca \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..2e3943cf79345 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +b07883b5e988d1d991503aa49d9b59059518825d \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 2d6f580c35a23..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc0708acbac195772b67b5ad2e9c4683d27ff450 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-4d78db26be.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..1d21c6e5b613c --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +1b46b3ee62932de7ba7b670820a13eb973ec5777 \ No newline at end of file diff --git a/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java b/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java index d9fc4521a3593..36b5bea411a93 100644 --- a/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java +++ b/plugins/examples/rescore/src/test/java/org/elasticsearch/example/rescore/ExampleRescoreBuilderTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -68,7 +69,7 @@ public void testRescore() throws IOException { String fieldFactor = null; ExampleRescoreBuilder builder = new ExampleRescoreBuilder(factor, fieldFactor).windowSize(2); RescoreContext context = builder.buildContext(null); - TopDocs docs = new TopDocs(10, new ScoreDoc[3], 0); + TopDocs docs = new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[3]); docs.scoreDocs[0] = new ScoreDoc(0, 1.0f); docs.scoreDocs[1] = new ScoreDoc(1, 1.0f); docs.scoreDocs[2] = new ScoreDoc(2, 1.0f); diff --git a/server/licenses/lucene-analyzers-common-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-analyzers-common-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 2cbf39687624c..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c547b30525ad80d0ceeaa40c2d3a901c7e76fd46 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..3a02e483d6808 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +fa8e0fbef3e3fcf49ace4a4153580070def770eb \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-backward-codecs-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 9e2473361f033..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c327295d54d5abd2684e00c3aefe58aa1caace7 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..8279b81d6cfc0 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +3d636541581e338a1be7e3e176aac73d7ae0b323 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index fdedaf3fc5756..0000000000000 --- a/server/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73dd7703a94ec2357581f65ee7c1c4d618ff310f \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..683b585bb2f61 --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +126faacb28d1b8cc1ab81d702973d057892120d1 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-grouping-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 4e555692b0f9a..0000000000000 --- a/server/licenses/lucene-grouping-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c3802fa30990a1758f2df19d17fe2c95fc45870 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..483f470b5e015 --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +abd514ec02837f48b8c478287fde7cc5d6439ada \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-highlighter-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 73b6c15f332f9..0000000000000 --- a/server/licenses/lucene-highlighter-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d7abdbb7900d7e6a76c391d8be07217c0d882ca \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..27dd042c06bf3 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +778e87a263184b8ddcbb4ef9d244467933f32993 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-join-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 23414b8e8e134..0000000000000 --- a/server/licenses/lucene-join-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -011f78ae9d9a386fcf20ceea29ba30e75fb512e8 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..13d2db8d210dc --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +96aff29ad966204c73f8dd98d8116f09e34b6ebd \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-memory-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index d227ebaf46368..0000000000000 --- a/server/licenses/lucene-memory-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c3dd461a7cebdcacc77304660218513e10f89adb \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..6e014f20c97fd --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +e72e2accebb1277c57dfe21bc011195eed91dbfd \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-misc-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index a892f3a2272ba..0000000000000 --- a/server/licenses/lucene-misc-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d63101181708d78eccc441b0d1193dd91d1a0bf1 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..57081e7aa10ba --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +bf25587ebf6823781f5d7acffd7d65c46c21cb27 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-queries-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 5d0fead48cbc9..0000000000000 --- a/server/licenses/lucene-queries-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22e56fbd44d6a47d7dddbdda3c17ce22ad0a6680 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..6855364592ea5 --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +6cad42923bcb6e1c6060ae1cbab574646e8c808e \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-queryparser-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 8be3d6447b0bb..0000000000000 --- a/server/licenses/lucene-queryparser-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36b38a1d71045f5bee5dc40526f8d57084dbdc00 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..f9d037120a342 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +e5841d7e877e51bbd2d325709353f5ab7e94b49a \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-sandbox-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 6d968f5400c52..0000000000000 --- a/server/licenses/lucene-sandbox-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21eb8b111bcb94f4abb8c6402dfd10f51ecc0b38 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..45c8934a8d41b --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +fefe17f6ac0c7d505c5051e96d0f4916fec2bf9e \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-spatial-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index b6aec2eae1dda..0000000000000 --- a/server/licenses/lucene-spatial-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60081c5641ed21aea82d5d0976b40e1f184c8e5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..b02408a7683b3 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +22b0a9d9fb675f7c82a7a2b18f593f3278b40f11 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-spatial-extras-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 6999baccc89e9..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2d42b373546aa8923d25e4e9a673dd186064f9bd \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..d4e8b662ce465 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +bd6449cc67a36891f6b3201489c5ed44d795fab0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-spatial3d-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index b866b1985568b..0000000000000 --- a/server/licenses/lucene-spatial3d-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f31607959e5a2ed84ab2d9a007a3f76e9a2d38c \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..9743868e5c748 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +5e2a8b3e9e19ad61fcbd27a138cf55f2d6cbfb2d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/server/licenses/lucene-suggest-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index 55e1c5990de63..0000000000000 --- a/server/licenses/lucene-suggest-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7619348f0619867c52f4801531c70358f49873a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-4d78db26be.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..8b722955278cf --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +bd5931d1d5ca3f84565534182881565a44aeb72a \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index 3cc16ce9320d5..63db15b2ee168 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -58,7 +59,7 @@ public BinaryDocValuesRangeQuery(String fieldName, QueryType queryType, LengthTy } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index cd5da674b8e71..dd3ac992475b9 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.index.TermState; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; @@ -84,10 +84,10 @@ public Query rewrite(IndexReader reader) throws IOException { return rewritten; } IndexReaderContext context = reader.getContext(); - TermContext[] ctx = new TermContext[terms.length]; + TermStates[] ctx = new TermStates[terms.length]; int[] docFreqs = new int[ctx.length]; for (int i = 0; i < terms.length; i++) { - ctx[i] = TermContext.build(context, terms[i]); + ctx[i] = TermStates.build(context, terms[i], true); docFreqs[i] = ctx[i].docFreq(); } @@ -96,16 +96,16 @@ public Query rewrite(IndexReader reader) throws IOException { return topLevelQuery(terms, ctx, docFreqs, maxDoc); } - protected abstract Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc); + protected abstract Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc); - protected void blend(final TermContext[] contexts, int maxDoc, IndexReader reader) throws IOException { + protected void blend(final TermStates[] contexts, int maxDoc, IndexReader reader) throws IOException { if (contexts.length <= 1) { return; } int max = 0; long minSumTTF = Long.MAX_VALUE; for (int i = 0; i < contexts.length; i++) { - TermContext ctx = contexts[i]; + TermStates ctx = contexts[i]; int df = ctx.docFreq(); // we use the max here since it's the only "true" estimation we can make here // at least max(df) documents have that term. Sum or Averages don't seem @@ -155,7 +155,7 @@ protected int compare(int i, int j) { // the more popular (more frequent) fields // that acts as a tie breaker for (int i : tieBreak) { - TermContext ctx = contexts[i]; + TermStates ctx = contexts[i]; if (ctx.docFreq() == 0) { break; } @@ -183,12 +183,12 @@ protected int compare(int i, int j) { } } - private TermContext adjustTTF(IndexReaderContext readerContext, TermContext termContext, long sumTTF) { + private TermStates adjustTTF(IndexReaderContext readerContext, TermStates termContext, long sumTTF) throws IOException { assert termContext.wasBuiltFor(readerContext); if (sumTTF == -1 && termContext.totalTermFreq() == -1) { return termContext; } - TermContext newTermContext = new TermContext(readerContext); + TermStates newTermContext = new TermStates(readerContext); List leaves = readerContext.leaves(); final int len; if (leaves == null) { @@ -199,7 +199,7 @@ private TermContext adjustTTF(IndexReaderContext readerContext, TermContext term int df = termContext.docFreq(); long ttf = sumTTF; for (int i = 0; i < len; i++) { - TermState termState = termContext.get(i); + TermState termState = termContext.get(leaves.get(i)); if (termState == null) { continue; } @@ -210,7 +210,7 @@ private TermContext adjustTTF(IndexReaderContext readerContext, TermContext term return newTermContext; } - private static TermContext adjustDF(IndexReaderContext readerContext, TermContext ctx, int newDocFreq) { + private static TermStates adjustDF(IndexReaderContext readerContext, TermStates ctx, int newDocFreq) throws IOException { assert ctx.wasBuiltFor(readerContext); // Use a value of ttf that is consistent with the doc freq (ie. gte) long newTTF; @@ -226,9 +226,9 @@ private static TermContext adjustDF(IndexReaderContext readerContext, TermContex } else { len = leaves.size(); } - TermContext newCtx = new TermContext(readerContext); + TermStates newCtx = new TermStates(readerContext); for (int i = 0; i < len; ++i) { - TermState termState = ctx.get(i); + TermState termState = ctx.get(leaves.get(i)); if (termState == null) { continue; } @@ -299,7 +299,7 @@ public int hashCode() { public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) { return new BlendedTermQuery(terms, boosts) { @Override - protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) { + protected Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc) { BooleanQuery.Builder highBuilder = new BooleanQuery.Builder(); BooleanQuery.Builder lowBuilder = new BooleanQuery.Builder(); for (int i = 0; i < terms.length; i++) { @@ -342,7 +342,7 @@ public static BlendedTermQuery dismaxBlendedQuery(Term[] terms, final float tieB public static BlendedTermQuery dismaxBlendedQuery(Term[] terms, final float[] boosts, final float tieBreakerMultiplier) { return new BlendedTermQuery(terms, boosts) { @Override - protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) { + protected Query topLevelQuery(Term[] terms, TermStates[] ctx, int[] docFreqs, int maxDoc) { List queries = new ArrayList<>(ctx.length); for (int i = 0; i < terms.length; i++) { Query query = new TermQuery(terms[i], ctx[i]); diff --git a/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java index 0fed8316a0564..b9a001b6e7370 100644 --- a/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/MinDocQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; @@ -76,7 +77,7 @@ public Query rewrite(IndexReader reader) throws IOException { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { if (readerId == null) { throw new IllegalStateException("Rewrite first"); } else if (Objects.equals(searcher.getIndexReader().getContext().id(), readerId) == false) { diff --git a/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java b/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java index 5da0e618752e2..2c436f0227222 100644 --- a/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/SearchAfterSortedDocQuery.java @@ -23,16 +23,17 @@ import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.EarlyTerminatingSortingCollector; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; +import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; import java.util.Arrays; @@ -53,7 +54,7 @@ public SearchAfterSortedDocQuery(Sort sort, FieldDoc after) { throw new IllegalArgumentException("after doc has " + after.fields.length + " value(s) but sort has " + sort.getSort().length + "."); } - this.sort = sort; + this.sort = Objects.requireNonNull(sort); this.after = after; int numFields = sort.getSort().length; this.fieldComparators = new FieldComparator[numFields]; @@ -70,12 +71,12 @@ public SearchAfterSortedDocQuery(Sort sort, FieldDoc after) { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, 1.0f) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { Sort segmentSort = context.reader().getMetaData().getSort(); - if (EarlyTerminatingSortingCollector.canEarlyTerminate(sort, segmentSort) == false) { + if (segmentSort == null || Lucene.canEarlyTerminate(sort, segmentSort) == false) { throw new IOException("search sort :[" + sort.getSort() + "] does not match the index sort:[" + segmentSort + "]"); } final int afterDoc = after.doc - context.docBase; diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java index c5362cbf85812..4dba67abdeb9a 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.PriorityQueue; import java.util.ArrayList; @@ -40,9 +41,9 @@ public final class CollapseTopFieldDocs extends TopFieldDocs { /** The collapse value for each top doc */ public final Object[] collapseValues; - public CollapseTopFieldDocs(String field, long totalHits, ScoreDoc[] scoreDocs, - SortField[] sortFields, Object[] values, float maxScore) { - super(totalHits, scoreDocs, sortFields, maxScore); + public CollapseTopFieldDocs(String field, TotalHits totalHits, ScoreDoc[] scoreDocs, + SortField[] sortFields, Object[] values) { + super(totalHits, scoreDocs, sortFields); this.field = field; this.collapseValues = values; } @@ -172,23 +173,23 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, long totalHitCount = 0; int availHitCount = 0; - float maxScore = Float.MIN_VALUE; + TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; for(int shardIDX=0;shardIDX 0) { availHitCount += shard.scoreDocs.length; queue.add(new ShardRef(shardIDX, setShardIndex == false)); - maxScore = Math.max(maxScore, shard.getMaxScore()); } } - if (availHitCount == 0) { - maxScore = Float.NaN; - } - final ScoreDoc[] hits; final Object[] values; if (availHitCount <= start) { @@ -237,6 +238,7 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, hits = hitList.toArray(new ScoreDoc[0]); values = collapseList.toArray(new Object[0]); } - return new CollapseTopFieldDocs(collapseField, totalHitCount, hits, sort.getSort(), values, maxScore); + TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation); + return new CollapseTopFieldDocs(collapseField, totalHits, hits, sort.getSort(), values); } } diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index fedda3ead596b..7f36074d1459b 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -20,9 +20,11 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.TotalHits; import java.io.IOException; import java.util.Collection; @@ -34,6 +36,9 @@ * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. + * + * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. + * This is safe for collapsing since the group sort is the same as the query sort. */ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollector { protected final String collapseField; @@ -42,32 +47,23 @@ public final class CollapsingTopDocsCollector extends FirstPassGroupingCollec protected Scorer scorer; private int totalHitCount; - private float maxScore; - private final boolean trackMaxScore; - CollapsingTopDocsCollector(GroupSelector groupSelector, String collapseField, Sort sort, - int topN, boolean trackMaxScore) { + CollapsingTopDocsCollector(GroupSelector groupSelector, String collapseField, Sort sort, int topN) { super(groupSelector, sort, topN); this.collapseField = collapseField; - this.trackMaxScore = trackMaxScore; - if (trackMaxScore) { - maxScore = Float.NEGATIVE_INFINITY; - } else { - maxScore = Float.NaN; - } this.sort = sort; } /** - * Transform {@link FirstPassGroupingCollector#getTopGroups(int, boolean)} output in + * Transform {@link FirstPassGroupingCollector#getTopGroups(int)} output in * {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can get the final top docs at the end * of the first pass. */ public CollapseTopFieldDocs getTopDocs() throws IOException { - Collection> groups = super.getTopGroups(0, true); + Collection> groups = super.getTopGroups(0); if (groups == null) { - return new CollapseTopFieldDocs(collapseField, totalHitCount, new ScoreDoc[0], - sort.getSort(), new Object[0], Float.NaN); + TotalHits totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO); + return new CollapseTopFieldDocs(collapseField, totalHits, new ScoreDoc[0], sort.getSort(), new Object[0]); } FieldDoc[] docs = new FieldDoc[groups.size()]; Object[] collapseValues = new Object[groups.size()]; @@ -92,16 +88,17 @@ public CollapseTopFieldDocs getTopDocs() throws IOException { collapseValues[pos] = group.groupValue; pos++; } - return new CollapseTopFieldDocs(collapseField, totalHitCount, docs, sort.getSort(), - collapseValues, maxScore); + TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO); + return new CollapseTopFieldDocs(collapseField, totalHits, docs, sort.getSort(), collapseValues); } @Override - public boolean needsScores() { - if (super.needsScores() == false) { - return trackMaxScore; + public ScoreMode scoreMode() { + if (super.scoreMode().needsScores()) { + return ScoreMode.COMPLETE; + } else { + return ScoreMode.COMPLETE_NO_SCORES; } - return true; } @Override @@ -113,9 +110,6 @@ public void setScorer(Scorer scorer) throws IOException { @Override public void collect(int doc) throws IOException { super.collect(doc); - if (trackMaxScore) { - maxScore = Math.max(maxScore, scorer.score()); - } totalHitCount++; } @@ -134,9 +128,9 @@ public void collect(int doc) throws IOException { * @param topN How many top groups to keep. */ public static CollapsingTopDocsCollector createNumeric(String collapseField, Sort sort, - int topN, boolean trackMaxScore) { + int topN) { return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Numeric(collapseField), - collapseField, sort, topN, trackMaxScore); + collapseField, sort, topN); } /** @@ -153,8 +147,8 @@ public static CollapsingTopDocsCollector createNumeric(String collapseField, * @param topN How many top groups to keep. */ public static CollapsingTopDocsCollector createKeyword(String collapseField, Sort sort, - int topN, boolean trackMaxScore) { + int topN) { return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Keyword(collapseField), - collapseField, sort, topN, trackMaxScore); + collapseField, sort, topN); } } diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java index 45ee7becc983e..d9bf9613cba07 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighter.java @@ -48,6 +48,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; /** * Subclass of the {@link UnifiedHighlighter} that works for a single field in a single document. @@ -136,15 +137,16 @@ protected PassageFormatter getFormatter(String field) { @Override protected FieldHighlighter getFieldHighlighter(String field, Query query, Set allTerms, int maxPassages) { - BytesRef[] terms = filterExtractedTerms(getFieldMatcher(field), allTerms); + Predicate fieldMatcher = getFieldMatcher(field); + BytesRef[] terms = filterExtractedTerms(fieldMatcher, allTerms); Set highlightFlags = getFlags(field); PhraseHelper phraseHelper = getPhraseHelper(field, query, highlightFlags); CharacterRunAutomaton[] automata = getAutomata(field, query, highlightFlags); OffsetSource offsetSource = getOptimizedOffsetSource(field, terms, phraseHelper, automata); BreakIterator breakIterator = new SplittingBreakIterator(getBreakIterator(field), UnifiedHighlighter.MULTIVAL_SEP_CHAR); - FieldOffsetStrategy strategy = - getOffsetStrategy(offsetSource, field, terms, phraseHelper, automata, highlightFlags); + UHComponents components = new UHComponents(field, fieldMatcher, query, terms, phraseHelper, automata, highlightFlags); + FieldOffsetStrategy strategy = getOffsetStrategy(offsetSource, components); return new CustomFieldHighlighter(field, strategy, breakIteratorLocale, breakIterator, getScorer(field), maxPassages, (noMatchSize > 0 ? 1 : 0), getFormatter(field), noMatchSize, fieldValue); } diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 6b670953ecbf0..16073abfc0087 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; -import org.apache.lucene.queries.BoostingQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; @@ -74,12 +73,11 @@ void flatten(Query sourceQuery, IndexReader reader, Collection flatQuerie } else if (sourceQuery instanceof BlendedTermQuery) { final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery; flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost); - } else if (sourceQuery instanceof BoostingQuery) { - BoostingQuery boostingQuery = (BoostingQuery) sourceQuery; - //flatten positive query with query boost - flatten(boostingQuery.getMatch(), reader, flatQueries, boost); - //flatten negative query with negative boost - flatten(boostingQuery.getContext(), reader, flatQueries, boostingQuery.getBoost()); + } else if (sourceQuery instanceof org.apache.lucene.queries.function.FunctionScoreQuery) { + org.apache.lucene.queries.function.FunctionScoreQuery funcScoreQuery = + (org.apache.lucene.queries.function.FunctionScoreQuery) sourceQuery; + //flatten query with query boost + flatten(funcScoreQuery.getWrappedQuery(), reader, flatQueries, boost); } else if (sourceQuery instanceof SynonymQuery) { // SynonymQuery should be handled by the parent class directly. // This statement should be removed when https://issues.apache.org/jira/browse/LUCENE-7484 is merged. diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 65dc1e2d23131..01738930b4bcf 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -105,7 +105,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version CURRENT = V_7_0_0_alpha1; static { diff --git a/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java b/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java new file mode 100644 index 0000000000000..071cd92330496 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/MaxScoreCollector.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.SimpleCollector; + +import java.io.IOException; + +/** + * A collector that computes the maximum score. + */ +public class MaxScoreCollector extends SimpleCollector { + + private Scorer scorer; + private float maxScore = Float.NEGATIVE_INFINITY; + private boolean hasHits = false; + + @Override + public void setScorer(Scorer scorer) { + this.scorer = scorer; + } + + @Override + public ScoreMode scoreMode() { + // Could be TOP_SCORES but it is always used in a MultiCollector anyway, so this saves some wrapping. + return ScoreMode.COMPLETE; + } + + @Override + public void collect(int doc) throws IOException { + hasHits = true; + maxScore = Math.max(maxScore, scorer.score()); + } + + /** + * Get the maximum score. This returns {@link Float#NaN} if no hits were + * collected. + */ + public float getMaxScore() { + return hasHits ? maxScore : Float.NaN; + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index fb450b2ce8359..9b4d232f23ca6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -31,9 +31,12 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -94,14 +97,15 @@ public AggregatedDfs aggregateDfs(Collection results) { assert terms.length == stats.length; for (int i = 0; i < terms.length; i++) { assert terms[i] != null; + if (stats[i] == null) { + continue; + } TermStatistics existing = termStatistics.get(terms[i]); if (existing != null) { assert terms[i].bytes().equals(existing.term()); - // totalTermFrequency is an optional statistic we need to check if either one or both - // are set to -1 which means not present and then set it globally to -1 termStatistics.put(terms[i], new TermStatistics(existing.term(), - existing.docFreq() + stats[i].docFreq(), - optionalSum(existing.totalTermFreq(), stats[i].totalTermFreq()))); + existing.docFreq() + stats[i].docFreq(), + existing.totalTermFreq() + stats[i].totalTermFreq())); } else { termStatistics.put(terms[i], stats[i]); } @@ -115,14 +119,17 @@ public AggregatedDfs aggregateDfs(Collection results) { if (keys[i] != null) { String key = (String) keys[i]; CollectionStatistics value = (CollectionStatistics) values[i]; + if (value == null) { + continue; + } assert key != null; CollectionStatistics existing = fieldStatistics.get(key); if (existing != null) { - CollectionStatistics merged = new CollectionStatistics( - key, existing.maxDoc() + value.maxDoc(), - optionalSum(existing.docCount(), value.docCount()), - optionalSum(existing.sumTotalTermFreq(), value.sumTotalTermFreq()), - optionalSum(existing.sumDocFreq(), value.sumDocFreq()) + CollectionStatistics merged = new CollectionStatistics(key, + existing.maxDoc() + value.maxDoc(), + existing.docCount() + value.docCount(), + existing.sumTotalTermFreq() + value.sumTotalTermFreq(), + existing.sumDocFreq() + value.sumDocFreq() ); fieldStatistics.put(key, merged); } else { @@ -135,10 +142,6 @@ public AggregatedDfs aggregateDfs(Collection results) { return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); } - private static long optionalSum(long left, long right) { - return Math.min(left, right) == -1 ? -1 : left + right; - } - /** * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the @@ -156,7 +159,7 @@ private static long optionalSum(long left, long right) { * @param size the number of hits to return from the merged top docs */ public SortedTopDocs sortDocs(boolean ignoreFrom, Collection results, - final Collection bufferedTopDocs, final TopDocsStats topDocsStats, int from, int size) { + final Collection bufferedTopDocs, final TopDocsStats topDocsStats, int from, int size) { if (results.isEmpty()) { return SortedTopDocs.EMPTY; } @@ -169,12 +172,12 @@ public SortedTopDocs sortDocs(boolean ignoreFrom, Collection 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet - setShardIndex(td, queryResult.getShardIndex()); - topDocs.add(td); + if (td.topDocs.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet + setShardIndex(td.topDocs, queryResult.getShardIndex()); + topDocs.add(td.topDocs); } } if (queryResult.hasSuggestHits()) { @@ -387,7 +390,9 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr assert index < fetchResult.hits().getHits().length : "not enough hits fetched. index [" + index + "] length: " + fetchResult.hits().getHits().length; SearchHit searchHit = fetchResult.hits().getHits()[index]; - searchHit.score(shardDoc.score); + if (sorted == false) { + searchHit.score(shardDoc.score); + } searchHit.shard(fetchResult.getSearchShardTarget()); if (sorted) { FieldDoc fieldDoc = (FieldDoc) shardDoc; @@ -683,10 +688,10 @@ private synchronized void consumeInternal(QuerySearchResult querySearchResult) { aggsBuffer[i] = (InternalAggregations) querySearchResult.consumeAggs(); } if (hasTopDocs) { - final TopDocs topDocs = querySearchResult.consumeTopDocs(); // can't be null + final TopDocsAndMaxScore topDocs = querySearchResult.consumeTopDocs(); // can't be null topDocsStats.add(topDocs); - SearchPhaseController.setShardIndex(topDocs, querySearchResult.getShardIndex()); - topDocsBuffer[i] = topDocs; + SearchPhaseController.setShardIndex(topDocs.topDocs, querySearchResult.getShardIndex()); + topDocsBuffer[i] = topDocs.topDocs; } } @@ -743,6 +748,7 @@ public ReducedQueryPhase reduce() { static final class TopDocsStats { final boolean trackTotalHits; long totalHits; + TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; long fetchHits; float maxScore = Float.NEGATIVE_INFINITY; @@ -755,13 +761,16 @@ static final class TopDocsStats { this.totalHits = trackTotalHits ? 0 : -1; } - void add(TopDocs topDocs) { + void add(TopDocsAndMaxScore topDocs) { if (trackTotalHits) { - totalHits += topDocs.totalHits; + totalHits += topDocs.topDocs.totalHits.value; + if (topDocs.topDocs.totalHits.relation == Relation.GREATER_THAN_OR_EQUAL_TO) { + totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + } } - fetchHits += topDocs.scoreDocs.length; - if (!Float.isNaN(topDocs.getMaxScore())) { - maxScore = Math.max(maxScore, topDocs.getMaxScore()); + fetchHits += topDocs.topDocs.scoreDocs.length; + if (!Float.isNaN(topDocs.maxScore)) { + maxScore = Math.max(maxScore, topDocs.maxScore); } } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 031a537c37b34..7d13cff2ebd09 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -22,7 +22,9 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.apache.lucene.index.Fields; +import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SlowImpactsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.BoostAttribute; @@ -348,6 +350,11 @@ public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { : null, hasPayloads ? payloads : null, freq); } + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return new SlowImpactsEnum(postings(null, flags)); + } + }; } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java index 8a54406c1f9cb..9aca80b533f66 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java @@ -112,13 +112,17 @@ void setFields(Fields termVectorsByField, Set selectedFields, EnumSet= -1); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 1c1e56878932d..6016c7cb4c45f 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -50,13 +50,16 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; +import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; @@ -73,6 +76,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -81,6 +85,7 @@ import java.io.IOException; import java.text.ParseException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -89,7 +94,7 @@ public class Lucene { public static final String LATEST_DOC_VALUES_FORMAT = "Lucene70"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; - public static final String LATEST_CODEC = "Lucene70"; + public static final String LATEST_CODEC = "Lucene80"; static { Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); @@ -105,7 +110,7 @@ public class Lucene { public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0]; - public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(0, EMPTY_SCORE_DOCS, Float.NaN); + public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), EMPTY_SCORE_DOCS); public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { if (version == null) { @@ -251,7 +256,7 @@ protected Object doBody(String segmentFileName) throws IOException { * Check whether there is one or more documents matching the provided query. */ public static boolean exists(IndexSearcher searcher, Query query) throws IOException { - final Weight weight = searcher.createNormalizedWeight(query, false); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); // the scorer API should be more efficient at stopping after the first // match than the bulk scorer API for (LeafReaderContext context : searcher.getIndexReader().leaves()) { @@ -270,19 +275,28 @@ public static boolean exists(IndexSearcher searcher, Query query) throws IOExcep return false; } - public static TopDocs readTopDocs(StreamInput in) throws IOException { + private static TotalHits readTotalHits(StreamInput in) throws IOException { + long totalHits = in.readVLong(); + TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; + if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + totalHitsRelation = in.readEnum(TotalHits.Relation.class); + } + return new TotalHits(totalHits, totalHitsRelation); + } + + public static TopDocsAndMaxScore readTopDocs(StreamInput in) throws IOException { byte type = in.readByte(); if (type == 0) { - long totalHits = in.readVLong(); + TotalHits totalHits = readTotalHits(in); float maxScore = in.readFloat(); ScoreDoc[] scoreDocs = new ScoreDoc[in.readVInt()]; for (int i = 0; i < scoreDocs.length; i++) { scoreDocs[i] = new ScoreDoc(in.readVInt(), in.readFloat()); } - return new TopDocs(totalHits, scoreDocs, maxScore); + return new TopDocsAndMaxScore(new TopDocs(totalHits, scoreDocs), maxScore); } else if (type == 1) { - long totalHits = in.readVLong(); + TotalHits totalHits = readTotalHits(in); float maxScore = in.readFloat(); SortField[] fields = new SortField[in.readVInt()]; @@ -294,9 +308,9 @@ public static TopDocs readTopDocs(StreamInput in) throws IOException { for (int i = 0; i < fieldDocs.length; i++) { fieldDocs[i] = readFieldDoc(in); } - return new TopFieldDocs(totalHits, fieldDocs, fields, maxScore); + return new TopDocsAndMaxScore(new TopFieldDocs(totalHits, fieldDocs, fields), maxScore); } else if (type == 2) { - long totalHits = in.readVLong(); + TotalHits totalHits = readTotalHits(in); float maxScore = in.readFloat(); String field = in.readString(); @@ -311,7 +325,7 @@ public static TopDocs readTopDocs(StreamInput in) throws IOException { fieldDocs[i] = readFieldDoc(in); collapseValues[i] = readSortValue(in); } - return new CollapseTopFieldDocs(field, totalHits, fieldDocs, fields, collapseValues, maxScore); + return new TopDocsAndMaxScore(new CollapseTopFieldDocs(field, totalHits, fieldDocs, fields, collapseValues), maxScore); } else { throw new IllegalStateException("Unknown type " + type); } @@ -381,13 +395,22 @@ public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { private static final Class GEO_DISTANCE_SORT_TYPE_CLASS = LatLonDocValuesField.newDistanceSort("some_geo_field", 0, 0).getClass(); - public static void writeTopDocs(StreamOutput out, TopDocs topDocs) throws IOException { - if (topDocs instanceof CollapseTopFieldDocs) { + private static void writeTotalHits(StreamOutput out, TotalHits totalHits) throws IOException { + out.writeVLong(totalHits.value); + if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + out.writeEnum(totalHits.relation); + } else if (totalHits.relation != TotalHits.Relation.EQUAL_TO) { + throw new IllegalArgumentException("Cannot serialize approximate total hit counts to nodes that are on a version < 7.0.0"); + } + } + + public static void writeTopDocs(StreamOutput out, TopDocsAndMaxScore topDocs) throws IOException { + if (topDocs.topDocs instanceof CollapseTopFieldDocs) { out.writeByte((byte) 2); - CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs; + CollapseTopFieldDocs collapseDocs = (CollapseTopFieldDocs) topDocs.topDocs; - out.writeVLong(topDocs.totalHits); - out.writeFloat(topDocs.getMaxScore()); + writeTotalHits(out, topDocs.topDocs.totalHits); + out.writeFloat(topDocs.maxScore); out.writeString(collapseDocs.field); @@ -396,35 +419,35 @@ public static void writeTopDocs(StreamOutput out, TopDocs topDocs) throws IOExce writeSortField(out, sortField); } - out.writeVInt(topDocs.scoreDocs.length); - for (int i = 0; i < topDocs.scoreDocs.length; i++) { + out.writeVInt(topDocs.topDocs.scoreDocs.length); + for (int i = 0; i < topDocs.topDocs.scoreDocs.length; i++) { ScoreDoc doc = collapseDocs.scoreDocs[i]; writeFieldDoc(out, (FieldDoc) doc); writeSortValue(out, collapseDocs.collapseValues[i]); } - } else if (topDocs instanceof TopFieldDocs) { + } else if (topDocs.topDocs instanceof TopFieldDocs) { out.writeByte((byte) 1); - TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs; + TopFieldDocs topFieldDocs = (TopFieldDocs) topDocs.topDocs; - out.writeVLong(topDocs.totalHits); - out.writeFloat(topDocs.getMaxScore()); + writeTotalHits(out, topDocs.topDocs.totalHits); + out.writeFloat(topDocs.maxScore); out.writeVInt(topFieldDocs.fields.length); for (SortField sortField : topFieldDocs.fields) { writeSortField(out, sortField); } - out.writeVInt(topDocs.scoreDocs.length); + out.writeVInt(topDocs.topDocs.scoreDocs.length); for (ScoreDoc doc : topFieldDocs.scoreDocs) { writeFieldDoc(out, (FieldDoc) doc); } } else { out.writeByte((byte) 0); - out.writeVLong(topDocs.totalHits); - out.writeFloat(topDocs.getMaxScore()); + writeTotalHits(out, topDocs.topDocs.totalHits); + out.writeFloat(topDocs.maxScore); - out.writeVInt(topDocs.scoreDocs.length); - for (ScoreDoc doc : topDocs.scoreDocs) { + out.writeVInt(topDocs.topDocs.scoreDocs.length); + for (ScoreDoc doc : topDocs.topDocs.scoreDocs) { writeScoreDoc(out, doc); } } @@ -578,6 +601,24 @@ public static void writeSortField(StreamOutput out, SortField sortField) throws out.writeBoolean(sortField.getReverse()); } + private static Number readExplanationValue(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + final int numberType = in.readByte(); + switch (numberType) { + case 0: + return in.readFloat(); + case 1: + return in.readDouble(); + case 2: + return in.readZLong(); + default: + throw new IOException("Unexpected number type: " + numberType); + } + } else { + return in.readFloat(); + } + } + public static Explanation readExplanation(StreamInput in) throws IOException { boolean match = in.readBoolean(); String description = in.readString(); @@ -586,12 +627,29 @@ public static Explanation readExplanation(StreamInput in) throws IOException { subExplanations[i] = readExplanation(in); } if (match) { - return Explanation.match(in.readFloat(), description, subExplanations); + return Explanation.match(readExplanationValue(in), description, subExplanations); } else { return Explanation.noMatch(description, subExplanations); } } + private static void writeExplanationValue(StreamOutput out, Number value) throws IOException { + if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + if (value instanceof Float) { + out.writeByte((byte) 0); + out.writeFloat(value.floatValue()); + } else if (value instanceof Double) { + out.writeByte((byte) 1); + out.writeDouble(value.doubleValue()); + } else { + out.writeByte((byte) 2); + out.writeZLong(value.longValue()); + } + } else { + out.writeFloat(value.floatValue()); + } + } + public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException { out.writeBoolean(explanation.isMatch()); out.writeString(explanation.getDescription()); @@ -601,7 +659,7 @@ public static void writeExplanation(StreamOutput out, Explanation explanation) t writeExplanation(out, subExp); } if (explanation.isMatch()) { - out.writeFloat(explanation.getValue()); + writeExplanationValue(out, explanation.getValue()); } } @@ -705,6 +763,10 @@ public int docID() { public DocIdSetIterator iterator() { throw new IllegalStateException(message); } + @Override + public float getMaxScore(int upTo) throws IOException { + throw new IllegalStateException(message); + } }; } @@ -836,6 +898,19 @@ public int length() { }; } + /** + * Whether a query sorted by {@code searchSort} can be early-terminated if the index is sorted by {@code indexSort}. + */ + public static boolean canEarlyTerminate(Sort searchSort, Sort indexSort) { + final SortField[] fields1 = searchSort.getSort(); + final SortField[] fields2 = indexSort.getSort(); + // early termination is possible if fields1 is a prefix of fields2 + if (fields1.length > fields2.length) { + return false; + } + return Arrays.asList(fields1).equals(Arrays.asList(fields2).subList(0, fields1.length)); + } + /** * Wraps a directory reader to make all documents live except those were rolled back * or hard-deleted due to non-aborting exceptions during indexing. diff --git a/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java b/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java index 2552309450ba4..76b59887fb946 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/MinimumScoreCollector.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.ScoreCachingWrappingScorer; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SimpleCollector; @@ -63,7 +64,7 @@ public void doSetNextReader(LeafReaderContext context) throws IOException { } @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 0ff0008a43032..67f1495c79c2a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.lucene.index; +import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; @@ -28,6 +29,7 @@ import org.apache.lucene.search.FilteredDocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; @@ -80,7 +82,7 @@ public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @ } else { final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); - weight = searcher.createNormalizedWeight(filter, false); + weight = searcher.createWeight(searcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } for (LeafReaderContext context : leaves) { Terms terms = context.reader().terms(field); @@ -207,6 +209,11 @@ public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } + @Override + public ImpactsEnum impacts(int flags) throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); + } + @Override public BytesRef next() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index e9db2928ca724..f1e55d76296ca 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.Collector; import org.apache.lucene.search.FilterLeafCollector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -56,7 +57,7 @@ public void collect(int doc) throws IOException { } @Override - public boolean needsScores() { - return collector.needsScores(); + public ScoreMode scoreMode() { + return collector.scoreMode(); } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java b/server/src/main/java/org/elasticsearch/common/lucene/search/TopDocsAndMaxScore.java similarity index 54% rename from server/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java rename to server/src/main/java/org/elasticsearch/common/lucene/search/TopDocsAndMaxScore.java index 2339815b5582e..7cc1f9142de4f 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/StandardTokenFilterFactory.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/TopDocsAndMaxScore.java @@ -17,23 +17,22 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.common.lucene.search; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.standard.StandardFilter; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.index.IndexSettings; +import org.apache.lucene.search.TopDocs; +/** + * Wrapper around a {@link TopDocs} instance and the maximum score. + */ +// TODO: Remove this class when https://github.com/elastic/elasticsearch/issues/32981 is addressed. +public final class TopDocsAndMaxScore { -public class StandardTokenFilterFactory extends AbstractTokenFilterFactory { + public final TopDocs topDocs; + public float maxScore; - public StandardTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - super(indexSettings, name, settings); + public TopDocsAndMaxScore(TopDocs topDocs, float maxScore) { + this.topDocs = topDocs; + this.maxScore = maxScore; } - @Override - public TokenStream create(TokenStream tokenStream) { - return new StandardFilter(tokenStream); - } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java index 399f3d7a2e613..6d8a436c0b202 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java @@ -38,10 +38,10 @@ public float combine(double queryScore, double funcScore, double maxBoost) { public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); Explanation minExpl = Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, boostExpl); - return Explanation.match(queryExpl.getValue() * minExpl.getValue(), + return Explanation.match(queryExpl.getValue().floatValue() * minExpl.getValue().floatValue(), "function score, product of:", queryExpl, minExpl); } }, @@ -55,7 +55,7 @@ public float combine(double queryScore, double funcScore, double maxBoost) { public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); return Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, boostExpl); } @@ -69,9 +69,9 @@ public float combine(double queryScore, double funcScore, double maxBoost) { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { - Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); - return Explanation.match(Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(), "sum of", + return Explanation.match(Math.min(funcExpl.getValue().floatValue(), maxBoost) + queryExpl.getValue().floatValue(), "sum of", queryExpl, minExpl); } @@ -84,10 +84,10 @@ public float combine(double queryScore, double funcScore, double maxBoost) { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { - Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); return Explanation.match( - (float) ((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of", + (float) ((Math.min(funcExpl.getValue().floatValue(), maxBoost) + queryExpl.getValue().floatValue()) / 2.0), "avg of", queryExpl, minExpl); } @@ -101,10 +101,10 @@ public float combine(double queryScore, double funcScore, double maxBoost) { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation innerMinExpl = Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), "min of:", + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); return Explanation.match( - Math.min(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "min of", + Math.min(Math.min(funcExpl.getValue().floatValue(), maxBoost), queryExpl.getValue().floatValue()), "min of", queryExpl, innerMinExpl); } @@ -118,10 +118,10 @@ public float combine(double queryScore, double funcScore, double maxBoost) { @Override public Explanation explain(Explanation queryExpl, Explanation funcExpl, float maxBoost) { Explanation innerMinExpl = Explanation.match( - Math.min(funcExpl.getValue(), maxBoost), "min of:", + Math.min(funcExpl.getValue().floatValue(), maxBoost), "min of:", funcExpl, Explanation.match(maxBoost, "maxBoost")); return Explanation.match( - Math.max(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "max of:", + Math.max(Math.min(funcExpl.getValue().floatValue(), maxBoost), queryExpl.getValue().floatValue()), "max of:", queryExpl, innerMinExpl); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index c49487cfb7eb4..fb5a82bc098e2 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -90,7 +90,7 @@ public double score(int docId, float subQueryScore) throws IOException { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { String modifierStr = modifier != null ? modifier.toString() : ""; String defaultStr = missing != null ? "?:" + missing : ""; - double score = score(docId, subQueryScore.getValue()); + double score = score(docId, subQueryScore.getValue().floatValue()); return Explanation.match( (float) score, String.format(Locale.ROOT, diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index c2263fc201e18..05b74a8b7fe3c 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -212,22 +212,27 @@ public Query rewrite(IndexReader reader) throws IOException { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - if (needsScores == false && minScore == null) { - return subQuery.createWeight(searcher, needsScores, boost); + public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { + if (scoreMode == org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES && minScore == null) { + return subQuery.createWeight(searcher, scoreMode, boost); } - boolean subQueryNeedsScores = combineFunction != CombineFunction.REPLACE; + org.apache.lucene.search.ScoreMode subQueryScoreMode = combineFunction != CombineFunction.REPLACE + ? org.apache.lucene.search.ScoreMode.COMPLETE + : org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES; Weight[] filterWeights = new Weight[functions.length]; for (int i = 0; i < functions.length; ++i) { - subQueryNeedsScores |= functions[i].needsScores(); + if (functions[i].needsScores()) { + subQueryScoreMode = org.apache.lucene.search.ScoreMode.COMPLETE; + } if (functions[i] instanceof FilterScoreFunction) { Query filter = ((FilterScoreFunction) functions[i]).filter; - filterWeights[i] = searcher.createNormalizedWeight(filter, false); + filterWeights[i] = searcher.createWeight(searcher.rewrite(filter), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); } } - Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores, boost); - return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryNeedsScores); + Weight subQueryWeight = subQuery.createWeight(searcher, subQueryScoreMode, boost); + return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryScoreMode.needsScores()); } class CustomBoostFactorWeight extends Weight { @@ -299,10 +304,9 @@ public Explanation explain(LeafReaderContext context, int doc) throws IOExceptio ScoreFunction function = functions[i]; Explanation functionExplanation = function.getLeafScoreFunction(context).explainScore(doc, expl); if (function instanceof FilterScoreFunction) { - double factor = functionExplanation.getValue(); - float sc = (float) factor; + float factor = functionExplanation.getValue().floatValue(); Query filterQuery = ((FilterScoreFunction) function).filter; - Explanation filterExplanation = Explanation.match(sc, "function score, product of:", + Explanation filterExplanation = Explanation.match(factor, "function score, product of:", Explanation.match(1.0f, "match filter: " + filterQuery.toString()), functionExplanation); functionsExplanations.add(filterExplanation); } else { @@ -319,14 +323,14 @@ public Explanation explain(LeafReaderContext context, int doc) throws IOExceptio FunctionFactorScorer scorer = functionScorer(context); int actualDoc = scorer.iterator().advance(doc); assert (actualDoc == doc); - double score = scorer.computeScore(doc, expl.getValue()); + double score = scorer.computeScore(doc, expl.getValue().floatValue()); factorExplanation = Explanation.match( (float) score, "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]", functionsExplanations); } expl = combineFunction.explain(expl, factorExplanation, maxBoost); } - if (minScore != null && minScore > expl.getValue()) { + if (minScore != null && minScore > expl.getValue().floatValue()) { expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl); } return expl; @@ -442,6 +446,11 @@ protected double computeScore(int docId, float subQueryScore) throws IOException } return factor; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; // TODO: what would be a good upper bound? + } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java index 8e21c1af41aef..5296926e9869d 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/MinScoreScorer.java @@ -59,6 +59,16 @@ public float score() throws IOException { return in.score(); } + @Override + public int advanceShallow(int target) throws IOException { + return in.advanceShallow(target); + } + + @Override + public float getMaxScore(int upTo) throws IOException { + return in.getMaxScore(upTo); + } + @Override public DocIdSetIterator iterator() { return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator()); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java index a104a416cc6bf..8694b6fa019f1 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java @@ -84,7 +84,7 @@ public double score(int docId, float subQueryScore) throws IOException { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { String field = fieldData == null ? null : fieldData.getFieldName(); return Explanation.match( - (float) score(docId, subQueryScore.getValue()), + (float) score(docId, subQueryScore.getValue().floatValue()), "random score function (seed: " + originalSeed + ", field: " + field + ")"); } }; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 7f8b10349bc7d..bf1ea637a9671 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -54,6 +54,11 @@ public float score() throws IOException { public DocIdSetIterator iterator() { throw new UnsupportedOperationException(); } + + @Override + public float getMaxScore(int upTo) throws IOException { + throw new UnsupportedOperationException(); + } } private final Script sScript; @@ -88,10 +93,10 @@ public Explanation explainScore(int docId, Explanation subQueryScore) throws IOE if (leafScript instanceof ExplainableSearchScript) { leafScript.setDocument(docId); scorer.docid = docId; - scorer.score = subQueryScore.getValue(); + scorer.score = subQueryScore.getValue().floatValue(); exp = ((ExplainableSearchScript) leafScript).explain(subQueryScore); } else { - double score = score(docId, subQueryScore.getValue()); + double score = score(docId, subQueryScore.getValue().floatValue()); String explanation = "script score function, computed with script:\"" + sScript + "\""; if (sScript.getParams() != null) { explanation += " and parameters: \n" + sScript.getParams().toString(); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index 7d96426e8695e..87f6b21e9da2b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -60,7 +60,7 @@ public double score(int docId, float subQueryScore) throws IOException { public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { Explanation functionExplanation = leafFunction.explainScore(docId, subQueryScore); return Explanation.match( - functionExplanation.getValue() * (float) getWeight(), "product of:", + functionExplanation.getValue().floatValue() * (float) getWeight(), "product of:", functionExplanation, explainWeight()); } }; diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index d8af8d3ead1f4..52439f7c89d14 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.Tuple; @@ -437,7 +437,7 @@ void validate( final String key, final Settings settings, final boolean validateDependencies, final boolean validateInternalOrPrivateIndex) { Setting setting = getRaw(key); if (setting == null) { - LevensteinDistance ld = new LevensteinDistance(); + LevenshteinDistance ld = new LevenshteinDistance(); List> scoredKeys = new ArrayList<>(); for (String k : this.keySettings.keySet()) { float distance = ld.getDistance(key, k); diff --git a/server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java b/server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java new file mode 100644 index 0000000000000..eb15ee130521e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/CachedSupplier.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.util; + +import java.util.function.Supplier; + +/** + * A {@link Supplier} that caches its return value. This may be useful to make + * a {@link Supplier} idempotent or for performance reasons if always returning + * the same instance is acceptable. + */ +public final class CachedSupplier implements Supplier { + + private Supplier supplier; + private T result; + private boolean resultSet; + + public CachedSupplier(Supplier supplier) { + this.supplier = supplier; + } + + @Override + public synchronized T get() { + if (resultSet == false) { + result = supplier.get(); + resultSet = true; + } + return result; + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java index f3559a650704f..d78d914a5eca6 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/StopAnalyzerProvider.java @@ -21,6 +21,7 @@ import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -32,7 +33,7 @@ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene70Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene70Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene80Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene80Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index bf1e48e7a6b27..dfbbf350dcb47 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -23,7 +23,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; -import org.apache.lucene.codecs.lucene70.Lucene70Codec; +import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -37,8 +37,7 @@ * per index in real time via the mapping API. If no specific postings format is * configured for a specific field the default postings format is used. */ -// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version -public class PerFieldMappingPostingFormatCodec extends Lucene70Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene80Codec { private final Logger logger; private final MapperService mapperService; diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index 2bca31f3bc88f..a44f8a0f8357b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -104,7 +104,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot { this.indexSearcher.setQueryCache(null); this.parallelArray = new ParallelArray(searchBatchSize); final TopDocs topDocs = searchOperations(null); - this.totalHits = Math.toIntExact(topDocs.totalHits); + this.totalHits = Math.toIntExact(topDocs.totalHits.value); this.scoreDocs = topDocs.scoreDocs; fillParallelArray(scoreDocs, parallelArray); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index fde97562de8f8..7faed37b2fd36 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; @@ -72,7 +73,7 @@ static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Su builder.add(retainSourceQuerySupplier.get(), BooleanClause.Occur.FILTER); IndexSearcher s = new IndexSearcher(reader); s.setQueryCache(null); - Weight weight = s.createWeight(s.rewrite(builder.build()), false, 1.0f); + Weight weight = s.createWeight(s.rewrite(builder.build()), ScoreMode.COMPLETE_NO_SCORES, 1.0f); Scorer scorer = weight.scorer(reader.getContext()); if (scorer != null) { return new SourcePruningFilterCodecReader(recoverySourceField, reader, BitSet.of(scorer.iterator(), reader.maxDoc())); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index da3dc75f4ef52..6896432bcdd55 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.FieldComparatorSource; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Weight; @@ -155,7 +156,7 @@ public BitSet rootDocs(LeafReaderContext ctx) throws IOException { public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); IndexSearcher indexSearcher = new IndexSearcher(topLevelCtx); - Weight weight = indexSearcher.createNormalizedWeight(innerQuery, false); + Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(innerQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 663aa7e6f9e10..fa1abe4293948 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; @@ -293,7 +294,7 @@ public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, Le } // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and // therefor is guaranteed to be a live doc. - final Weight nestedWeight = filter.createWeight(sc.searcher(), false, 1f); + final Weight nestedWeight = filter.createWeight(sc.searcher(), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer scorer = nestedWeight.scorer(context); if (scorer == null) { continue; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 29f1cbb721feb..f7bcab21d723d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -296,7 +296,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars); + TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); return new TokenStreamComponents(components.getTokenizer(), filter); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index a5a675e96dc93..162ce2a3fde61 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -25,7 +25,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -216,7 +216,7 @@ public Query rewrite(IndexReader reader) throws IOException { for (BytesRef type : types) { if (uniqueTypes.add(type)) { Term term = new Term(CONTENT_TYPE, type); - TermContext context = TermContext.build(reader.getContext(), term); + TermStates context = TermStates.build(reader.getContext(), term, true); if (context.docFreq() == 0) { // this _type is not present in the reader continue; diff --git a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index 35b0d18b1e88c..f3e6f6c8061e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.BoostingQuery; +import org.apache.lucene.queries.function.FunctionScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -201,7 +201,7 @@ public String getWriteableName() { protected Query doToQuery(QueryShardContext context) throws IOException { Query positive = positiveQuery.toQuery(context); Query negative = negativeQuery.toQuery(context); - return new BoostingQuery(positive, negative, negativeBoost); + return FunctionScoreQuery.boostByQuery(positive, negative, negativeBoost); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 8d7c0190eb210..d2b432e7c7ca1 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -22,22 +22,26 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.ObjectMapper; @@ -365,9 +369,9 @@ static final class NestedInnerHitSubContext extends InnerHitsContext.InnerHitSub } @Override - public TopDocs[] topDocs(SearchHit[] hits) throws IOException { + public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { Weight innerHitQueryWeight = createInnerHitQueryWeight(); - TopDocs[] result = new TopDocs[hits.length]; + TopDocsAndMaxScore[] result = new TopDocsAndMaxScore[hits.length]; for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; Query rawParentFilter; @@ -385,25 +389,38 @@ public TopDocs[] topDocs(SearchHit[] hits) throws IOException { Query childFilter = childObjectMapper.nestedTypeFilter(); BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query q = new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId); - Weight weight = context.searcher().createNormalizedWeight(q, false); + Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, Float.NaN); + result[i] = new TopDocsAndMaxScore(new TopDocs(new TotalHits(totalHitCountCollector.getTotalHits(), + TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN); } else { int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; + MaxScoreCollector maxScoreCollector = null; if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true); + topDocsCollector = TopFieldCollector.create(sort().sort, topN, Integer.MAX_VALUE); + if (trackScores()) { + maxScoreCollector = new MaxScoreCollector(); + } } else { - topDocsCollector = TopScoreDocCollector.create(topN); + topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE); + maxScoreCollector = new MaxScoreCollector(); } try { - intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); + intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); } finally { clearReleasables(Lifetime.COLLECTION); } - result[i] = topDocsCollector.topDocs(from(), size()); + + TopDocs td = topDocsCollector.topDocs(from(), size()); + float maxScore = Float.NaN; + if (maxScoreCollector != null) { + maxScore = maxScoreCollector.getMaxScore(); + } + result[i] = new TopDocsAndMaxScore(td, maxScore); } } return result; diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index c08f342d50846..50586aa2522ad 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -169,7 +170,7 @@ public int hashCode() { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java index 637d93212912f..6ea068176b41e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java @@ -20,7 +20,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -165,7 +165,7 @@ protected void checkMaxClauseCount(int count) { } @Override - protected void addClause(List topLevel, Term term, int docCount, float boost, TermContext states) { + protected void addClause(List topLevel, Term term, int docCount, float boost, TermStates states) { SpanTermQuery q = new SpanTermQuery(term, states); topLevel.add(q); } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index 54c25b40501d2..7d6dd4a59cb19 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -554,7 +554,7 @@ public Explanation explainScore(int docId, Explanation subQueryScore) throws IOE } double value = distance.doubleValue(); return Explanation.match( - (float) score(docId, subQueryScore.getValue()), + (float) score(docId, subQueryScore.getValue().floatValue()), "Function for field " + getFieldName() + ":", func.explainFunction(getDistanceString(ctx, docId), value, scale)); } diff --git a/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java b/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java index 3762b1fffc067..5e6aa3bb7c456 100644 --- a/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/ESToParentBlockJoinQuery.java @@ -75,8 +75,8 @@ public Query rewrite(IndexReader reader) throws IOException { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - return query.createWeight(searcher, needsScores, boost); + public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { + return query.createWeight(searcher, scoreMode, boost); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java index a2e738128e3eb..a6949c0559722 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java @@ -89,7 +89,7 @@ public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOExcep final IndexSearcher innerIndexSearcher = new IndexSearcher(reader); innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache()); innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy()); - innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity(true)); + innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity()); // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index e27c68c7570a7..a22193974272c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -73,7 +74,7 @@ final class ShardSplittingQuery extends Query { this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null; } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) { return new ConstantScoreWeight(this, boost) { @Override public String toString() { @@ -348,7 +349,7 @@ private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCre final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); - final Weight weight = searcher.createNormalizedWeight(query, false); + final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(context); return s == null ? null : BitSet.of(s.iterator(), context.reader().maxDoc()); }; diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java index aea18c30a6907..7e3efacfa20be 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarity.java @@ -20,19 +20,14 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.SmallFloat; import org.elasticsearch.script.SimilarityScript; import org.elasticsearch.script.SimilarityWeightScript; -import java.io.IOException; - /** * A {@link Similarity} implementation that allows scores to be scripted. */ @@ -65,8 +60,18 @@ public long computeNorm(FieldInvertState state) { return SmallFloat.intToByte4(numTerms); } + /** Compute the part of the score that does not depend on the current document using the init_script. */ + private double computeWeight(Query query, Field field, Term term) { + if (weightScriptFactory == null) { + return 1d; + } + SimilarityWeightScript weightScript = weightScriptFactory.newInstance(); + return weightScript.execute(query, field, term); + } + @Override - public SimWeight computeWeight(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + public SimScorer scorer(float boost, + CollectionStatistics collectionStats, TermStatistics... termStats) { Query query = new Query(boost); long docCount = collectionStats.docCount(); if (docCount == -1) { @@ -77,58 +82,32 @@ public SimWeight computeWeight(float boost, CollectionStatistics collectionStats for (int i = 0; i < termStats.length; ++i) { terms[i] = new Term(termStats[i].docFreq(), termStats[i].totalTermFreq()); } - return new Weight(collectionStats.field(), query, field, terms); - } - - /** Compute the part of the score that does not depend on the current document using the init_script. */ - private double computeWeight(Query query, Field field, Term term) throws IOException { - if (weightScriptFactory == null) { - return 1d; - } - SimilarityWeightScript weightScript = weightScriptFactory.newInstance(); - return weightScript.execute(query, field, term); - } - @Override - public SimScorer simScorer(SimWeight w, LeafReaderContext context) throws IOException { - Weight weight = (Weight) w; - SimScorer[] scorers = new SimScorer[weight.terms.length]; - for (int i = 0; i < weight.terms.length; ++i) { - final Term term = weight.terms[i]; + SimScorer[] scorers = new SimScorer[terms.length]; + for (int i = 0; i < terms.length; ++i) { + final Term term = terms[i]; final SimilarityScript script = scriptFactory.newInstance(); - final NumericDocValues norms = context.reader().getNormValues(weight.fieldName); - final Doc doc = new Doc(norms); - final double scoreWeight = computeWeight(weight.query, weight.field, term); + final Doc doc = new Doc(); + final double scoreWeight = computeWeight(query, field, term); scorers[i] = new SimScorer() { @Override - public float score(int docID, float freq) throws IOException { - doc.docID = docID; + public float score(float freq, long norm) { doc.freq = freq; - return (float) script.execute(scoreWeight, weight.query, weight.field, term, doc); + doc.norm = norm; + return (float) script.execute(scoreWeight, query, field, term, doc); } @Override - public float computeSlopFactor(int distance) { - return 1.0f / (distance + 1); - } - - @Override - public float computePayloadFactor(int doc, int start, int end, BytesRef payload) { - return 1f; - } - - @Override - public Explanation explain(int docID, Explanation freq) throws IOException { - doc.docID = docID; - float score = score(docID, freq.getValue()); + public Explanation explain(Explanation freq, long norm) { + float score = score(freq.getValue().floatValue(), norm); return Explanation.match(score, "score from " + ScriptedSimilarity.this.toString() + " computed from:", Explanation.match((float) scoreWeight, "weight"), - Explanation.match(weight.query.boost, "query.boost"), - Explanation.match(weight.field.docCount, "field.docCount"), - Explanation.match(weight.field.sumDocFreq, "field.sumDocFreq"), - Explanation.match(weight.field.sumTotalTermFreq, "field.sumTotalTermFreq"), + Explanation.match(query.boost, "query.boost"), + Explanation.match(field.docCount, "field.docCount"), + Explanation.match(field.sumDocFreq, "field.sumDocFreq"), + Explanation.match(field.sumTotalTermFreq, "field.sumTotalTermFreq"), Explanation.match(term.docFreq, "term.docFreq"), Explanation.match(term.totalTermFreq, "term.totalTermFreq"), Explanation.match(freq.getValue(), "doc.freq", freq.getDetails()), @@ -143,50 +122,26 @@ public Explanation explain(int docID, Explanation freq) throws IOException { return new SimScorer() { @Override - public float score(int doc, float freq) throws IOException { + public float score(float freq, long norm) { double sum = 0; for (SimScorer scorer : scorers) { - sum += scorer.score(doc, freq); + sum += scorer.score(freq, norm); } return (float) sum; } @Override - public float computeSlopFactor(int distance) { - return 1.0f / (distance + 1); - } - - @Override - public float computePayloadFactor(int doc, int start, int end, BytesRef payload) { - return 1f; - } - - @Override - public Explanation explain(int doc, Explanation freq) throws IOException { + public Explanation explain(Explanation freq, long norm) { Explanation[] subs = new Explanation[scorers.length]; for (int i = 0; i < subs.length; ++i) { - subs[i] = scorers[i].explain(doc, freq); + subs[i] = scorers[i].explain(freq, norm); } - return Explanation.match(score(doc, freq.getValue()), "Sum of:", subs); + return Explanation.match(score(freq.getValue().floatValue(), norm), "Sum of:", subs); } }; } } - private static class Weight extends SimWeight { - private final String fieldName; - private final Query query; - private final Field field; - private final Term[] terms; - - Weight(String fieldName, Query query, Field field, Term[] terms) { - this.fieldName = fieldName; - this.query = query; - this.field = field; - this.terms = terms; - } - } - /** Scoring factors that come from the query. */ public static class Query { private final float boost; @@ -254,25 +209,16 @@ public long getTotalTermFreq() { /** Statistics that are specific to a document. */ public static class Doc { - private final NumericDocValues norms; - private int docID; private float freq; + private long norm; - private Doc(NumericDocValues norms) { - this.norms = norms; - } + private Doc() {} /** Return the number of tokens that the current document has in the considered field. */ - public int getLength() throws IOException { + public int getLength() { // the length is computed lazily so that similarities that do not use the length are // not penalized - if (norms == null) { - return 1; - } else if (norms.advanceExact(docID)) { - return SmallFloat.byte4ToInt((byte) norms.longValue()); - } else { - return 0; - } + return SmallFloat.byte4ToInt((byte) norm); } /** Return the number of occurrences of the term in the current document for the considered field. */ diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index 18c6d6a3fc063..9aab1260b6b48 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -24,13 +24,10 @@ import org.apache.lucene.search.similarities.AfterEffectL; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModel; -import org.apache.lucene.search.similarities.BasicModelBE; -import org.apache.lucene.search.similarities.BasicModelD; import org.apache.lucene.search.similarities.BasicModelG; import org.apache.lucene.search.similarities.BasicModelIF; import org.apache.lucene.search.similarities.BasicModelIn; import org.apache.lucene.search.similarities.BasicModelIne; -import org.apache.lucene.search.similarities.BasicModelP; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.DFISimilarity; @@ -74,24 +71,35 @@ private SimilarityProviders() {} // no instantiation static final String DISCOUNT_OVERLAPS = "discount_overlaps"; private static final Map BASIC_MODELS; + private static final Map LEGACY_BASIC_MODELS; private static final Map AFTER_EFFECTS; + private static final Map LEGACY_AFTER_EFFECTS; static { Map models = new HashMap<>(); - models.put("be", new BasicModelBE()); - models.put("d", new BasicModelD()); models.put("g", new BasicModelG()); models.put("if", new BasicModelIF()); models.put("in", new BasicModelIn()); models.put("ine", new BasicModelIne()); - models.put("p", new BasicModelP()); BASIC_MODELS = unmodifiableMap(models); + Map legacyModels = new HashMap<>(); + // TODO: be and g and both based on the bose-einstein model. + // Is there a better replacement for d and p which use the binomial model? + legacyModels.put("be", "g"); + legacyModels.put("d", "ine"); + legacyModels.put("p", "ine"); + LEGACY_BASIC_MODELS = unmodifiableMap(legacyModels); + Map effects = new HashMap<>(); - effects.put("no", new AfterEffect.NoAfterEffect()); effects.put("b", new AfterEffectB()); effects.put("l", new AfterEffectL()); AFTER_EFFECTS = unmodifiableMap(effects); + + Map legacyEffects = new HashMap<>(); + // l is simpler than b, so this should be a better replacement for "no" + legacyEffects.put("no", "l"); + LEGACY_AFTER_EFFECTS = unmodifiableMap(legacyEffects); } private static final Map INDEPENDENCE_MEASURES; @@ -124,9 +132,25 @@ private SimilarityProviders() {} // no instantiation * @param settings Settings to parse * @return {@link BasicModel} referred to in the Settings */ - private static BasicModel parseBasicModel(Settings settings) { + private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings settings) { String basicModel = settings.get("basic_model"); BasicModel model = BASIC_MODELS.get(basicModel); + + if (model == null) { + String replacement = LEGACY_BASIC_MODELS.get(basicModel); + if (replacement != null) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Basic model [" + basicModel + "] isn't supported anymore, " + + "please use another model."); + } else { + DEPRECATION_LOGGER.deprecated("Basic model [" + basicModel + + "] isn't supported anymore and has arbitrarily been replaced with [" + replacement + "]."); + model = BASIC_MODELS.get(replacement); + assert model != null; + } + } + } + if (model == null) { throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "], expected one of " + BASIC_MODELS.keySet()); } @@ -139,9 +163,25 @@ private static BasicModel parseBasicModel(Settings settings) { * @param settings Settings to parse * @return {@link AfterEffect} referred to in the Settings */ - private static AfterEffect parseAfterEffect(Settings settings) { + private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Settings settings) { String afterEffect = settings.get("after_effect"); AfterEffect effect = AFTER_EFFECTS.get(afterEffect); + + if (effect == null) { + String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); + if (replacement != null) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("After effect [" + afterEffect + + "] isn't supported anymore, please use another effect."); + } else { + DEPRECATION_LOGGER.deprecated("After effect [" + afterEffect + + "] isn't supported anymore and has arbitrarily been replaced with [" + replacement + "]."); + effect = AFTER_EFFECTS.get(replacement); + assert effect != null; + } + } + } + if (effect == null) { throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "], expected one of " + AFTER_EFFECTS.keySet()); } @@ -263,8 +303,8 @@ public static DFRSimilarity createDfrSimilarity(Settings settings, Version index return new DFRSimilarity( - parseBasicModel(settings), - parseAfterEffect(settings), + parseBasicModel(indexCreatedVersion, settings), + parseAfterEffect(indexCreatedVersion, settings), parseNormalization(settings)); } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 1ecdc797073cf..a22ada87d772c 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -19,14 +19,17 @@ package org.elasticsearch.indices.analysis; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.standard.StandardFilter; +import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.NamedRegistry; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; @@ -39,7 +42,6 @@ import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.analysis.SimpleAnalyzerProvider; import org.elasticsearch.index.analysis.StandardAnalyzerProvider; -import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopAnalyzerProvider; import org.elasticsearch.index.analysis.StopTokenFilterFactory; @@ -69,6 +71,8 @@ public final class AnalysisModule { private static final IndexSettings NA_INDEX_SETTINGS; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(AnalysisModule.class)); + private final HunspellService hunspellService; private final AnalysisRegistry analysisRegistry; @@ -116,7 +120,29 @@ private NamedRegistry> setupTokenFilters(Li hunspellService) { NamedRegistry> tokenFilters = new NamedRegistry<>("token_filter"); tokenFilters.register("stop", StopTokenFilterFactory::new); - tokenFilters.register("standard", StandardTokenFilterFactory::new); + // Add "standard" for old indices (bwc) + tokenFilters.register("standard", new AnalysisProvider() { + @Override + public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + if (indexSettings.getIndexVersionCreated().before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("standard_deprecation", + "The [standard] token filter name is deprecated and will be removed in a future version."); + } else { + throw new IllegalArgumentException("The [standard] token filter has been removed."); + } + return new AbstractTokenFilterFactory(indexSettings, name, settings) { + @Override + public TokenStream create(TokenStream tokenStream) { + return tokenStream; + } + }; + } + + @Override + public boolean requiresAnalysisSettings() { + return false; + } + }); tokenFilters.register("shingle", ShingleTokenFilterFactory::new); tokenFilters.register("hunspell", requiresAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory (indexSettings, name, settings, hunspellService))); @@ -153,7 +179,17 @@ static Map setupPreConfiguredTokenFilters(List // Add filters available in lucene-core preConfiguredTokenFilters.register("lowercase", PreConfiguredTokenFilter.singleton("lowercase", true, LowerCaseFilter::new)); - preConfiguredTokenFilters.register("standard", PreConfiguredTokenFilter.singleton("standard", false, StandardFilter::new)); + // Add "standard" for old indices (bwc) + preConfiguredTokenFilters.register( "standard", + PreConfiguredTokenFilter.singletonWithVersion("standard", true, (reader, version) -> { + if (version.before(Version.V_7_0_0_alpha1)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("standard_deprecation", + "The [standard] token filter is deprecated and will be removed in a future version."); + } else { + throw new IllegalArgumentException("The [standard] token filter has been removed."); + } + return reader; + })); /* Note that "stop" is available in lucene-core but it's pre-built * version uses a set of English stop words that are in * lucene-analyzers-common so "stop" is defined in the analysis-common diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 0f31a8a46f1db..1b4772b3e51ef 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.core.SimpleAnalyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.standard.ClassicAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.Version; @@ -61,7 +62,7 @@ protected Analyzer create(Version version) { STOP { @Override protected Analyzer create(Version version) { - Analyzer a = new StopAnalyzer(); + Analyzer a = new StopAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); a.setVersion(version.luceneVersion); return a; } diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 585713b641f5e..6b9432483f304 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.CheckedConsumer; @@ -110,7 +110,7 @@ protected final String unrecognized( invalids.size() > 1 ? "s" : "")); boolean first = true; for (final String invalid : invalids) { - final LevensteinDistance ld = new LevensteinDistance(); + final LevenshteinDistance ld = new LevenshteinDistance(); final List> scoredParams = new ArrayList<>(); for (final String candidate : candidates) { final float distance = ld.getDistance(invalid, candidate); diff --git a/server/src/main/java/org/elasticsearch/script/SimilarityScript.java b/server/src/main/java/org/elasticsearch/script/SimilarityScript.java index c410a0bd6eba4..4aeb4063959b3 100644 --- a/server/src/main/java/org/elasticsearch/script/SimilarityScript.java +++ b/server/src/main/java/org/elasticsearch/script/SimilarityScript.java @@ -21,8 +21,6 @@ import org.elasticsearch.index.similarity.ScriptedSimilarity; -import java.io.IOException; - /** A script that is used to build {@link ScriptedSimilarity} instances. */ public abstract class SimilarityScript { @@ -34,7 +32,7 @@ public abstract class SimilarityScript { * @param doc per-document statistics */ public abstract double execute(double weight, ScriptedSimilarity.Query query, - ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, ScriptedSimilarity.Doc doc) throws IOException; + ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, ScriptedSimilarity.Doc doc); public interface Factory { SimilarityScript newInstance(); diff --git a/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java b/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java index f48a9c93e023b..04bbc3cccf40a 100644 --- a/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java +++ b/server/src/main/java/org/elasticsearch/script/SimilarityWeightScript.java @@ -21,8 +21,6 @@ import org.elasticsearch.index.similarity.ScriptedSimilarity; -import java.io.IOException; - /** A script that is used to compute scoring factors that are the same for all documents. */ public abstract class SimilarityWeightScript { @@ -32,7 +30,7 @@ public abstract class SimilarityWeightScript { * @param term term-level statistics */ public abstract double execute(ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, - ScriptedSimilarity.Term term) throws IOException; + ScriptedSimilarity.Term term); public interface Factory { SimilarityWeightScript newInstance(); diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 28a600c0d21ef..71ea55e97a762 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -82,7 +82,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable plugins) { registerFetchSubPhase(new VersionFetchSubPhase()); registerFetchSubPhase(new MatchedQueriesFetchSubPhase()); registerFetchSubPhase(new HighlightPhase(settings, highlighters)); + registerFetchSubPhase(new ScoreFetchSubPhase()); FetchPhaseConstructionContext context = new FetchPhaseConstructionContext(highlighters); registerFromPlugin(plugins, p -> p.getFetchSubPhases(context), this::registerFetchSubPhase); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index a7db2c55fe149..5cb9f81626c94 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -895,13 +895,13 @@ private void shortcutDocIdsToLoad(SearchContext context) { completionSuggestions = Collections.emptyList(); } if (context.request().scroll() != null) { - TopDocs topDocs = context.queryResult().topDocs(); + TopDocs topDocs = context.queryResult().topDocs().topDocs; docIdsToLoad = new int[topDocs.scoreDocs.length + numSuggestDocs]; for (int i = 0; i < topDocs.scoreDocs.length; i++) { docIdsToLoad[docsOffset++] = topDocs.scoreDocs[i].doc; } } else { - TopDocs topDocs = context.queryResult().topDocs(); + TopDocs topDocs = context.queryResult().topDocs().topDocs; if (topDocs.scoreDocs.length < context.from()) { // no more docs... docIdsToLoad = new int[numSuggestDocs]; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index 2ad76d8a2b49c..568a692ba61c0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -98,9 +99,9 @@ public void postCollection() throws IOException { badState(); } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { badState(); - return false; // unreachable + return ScoreMode.COMPLETE; // unreachable } }; addRequestCircuitBreakerBytes(DEFAULT_WEIGHT); @@ -137,13 +138,13 @@ protected long addRequestCircuitBreakerBytes(long bytes) { * your aggregator needs them. */ @Override - public boolean needsScores() { + public ScoreMode scoreMode() { for (Aggregator agg : subAggregators) { - if (agg.needsScores()) { - return true; + if (agg.scoreMode().needsScores()) { + return ScoreMode.COMPLETE; } } - return false; + return ScoreMode.COMPLETE_NO_SCORES; } public Map metaData() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 88cc7319948bd..59b63520a1bd3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -74,8 +75,8 @@ public Aggregator parent() { } @Override - public boolean needsScores() { - return first.needsScores(); + public ScoreMode scoreMode() { + return first.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java index f2c8bf5e16e44..c50dd615c7b34 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; +import org.apache.lucene.search.ScoreMode; import java.io.IOException; @@ -45,8 +46,8 @@ public void postCollection() throws IOException { // no-op } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java index a8a015ab5453b..624c8d5409a56 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -21,9 +21,11 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.ScoreCachingWrappingScorer; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import java.io.IOException; @@ -96,8 +98,8 @@ public static BucketCollector wrap(Iterable collector private MultiBucketCollector(BucketCollector... collectors) { this.collectors = collectors; int numNeedsScores = 0; - for (BucketCollector collector : collectors) { - if (collector.needsScores()) { + for (Collector collector : collectors) { + if (collector.scoreMode().needsScores()) { numNeedsScores += 1; } } @@ -105,27 +107,30 @@ private MultiBucketCollector(BucketCollector... collectors) { } @Override - public void preCollection() throws IOException { - for (BucketCollector collector : collectors) { - collector.preCollection(); + public ScoreMode scoreMode() { + ScoreMode scoreMode = null; + for (Collector collector : collectors) { + if (scoreMode == null) { + scoreMode = collector.scoreMode(); + } else if (scoreMode != collector.scoreMode()) { + return ScoreMode.COMPLETE; + } } + return scoreMode; } @Override - public void postCollection() throws IOException { + public void preCollection() throws IOException { for (BucketCollector collector : collectors) { - collector.postCollection(); + collector.preCollection(); } } @Override - public boolean needsScores() { + public void postCollection() throws IOException { for (BucketCollector collector : collectors) { - if (collector.needsScores()) { - return true; - } + collector.postCollection(); } - return false; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index 6ebf9e3c41c40..32695ac69a88e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.packed.PackedInts; @@ -81,11 +82,11 @@ public BestBucketsDeferringCollector(SearchContext context, boolean isGlobal) { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { if (collector == null) { throw new IllegalStateException(); } - return collector.needsScores(); + return collector.scoreMode(); } /** Set the deferred collectors. */ @@ -153,11 +154,11 @@ public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { } this.selectedBuckets = hash; - boolean needsScores = needsScores(); + boolean needsScores = scoreMode().needsScores(); Weight weight = null; if (needsScores) { Query query = isGlobal ? new MatchAllDocsQuery() : searchContext.query(); - weight = searchContext.searcher().createNormalizedWeight(query, true); + weight = searchContext.searcher().createWeight(searchContext.searcher().rewrite(query), ScoreMode.COMPLETE, 1f); } for (Entry entry : entries) { final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java index 3c63df2c06a76..7151a6f33d9fe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -62,8 +63,8 @@ protected class WrappedAggregator extends Aggregator { } @Override - public boolean needsScores() { - return in.needsScores(); + public ScoreMode scoreMode() { + return in.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index 5653bc58f2a6c..53049d0301c2d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.packed.PackedInts; @@ -66,11 +67,11 @@ public void setDeferredCollector(Iterable deferredCollectors) { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { if (collector == null) { throw new IllegalStateException(); } - return collector.needsScores(); + return collector.scoreMode(); } @Override @@ -158,10 +159,12 @@ public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { } this.selectedBuckets = hash; - boolean needsScores = collector.needsScores(); + boolean needsScores = collector.scoreMode().needsScores(); Weight weight = null; if (needsScores) { - weight = searchContext.searcher().createNormalizedWeight(searchContext.query(), true); + weight = searchContext.searcher().createWeight( + searchContext.searcher().rewrite(searchContext.query()), + ScoreMode.COMPLETE, 1f); } for (Entry entry : entries) { final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index 6df88379d4eb0..69bc2de39dca9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -51,7 +52,7 @@ public AdjacencyMatrixAggregatorFactory(String name, List filters, KeyedFilter keyedFilter = filters.get(i); this.keys[i] = keyedFilter.key(); Query filter = keyedFilter.filter().toFilter(context.getQueryShardContext()); - this.weights[i] = contextSearcher.createNormalizedWeight(filter, false); + this.weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 97c535f56c694..3c43cf3ec1d2c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.RoaringDocIdSet; @@ -204,11 +205,11 @@ public void collect(int doc, long bucket) throws IOException { * the {@link #deferredCollectors}. */ private void runDeferredCollections() throws IOException { - final boolean needsScores = needsScores(); + final boolean needsScores = scoreMode().needsScores(); Weight weight = null; if (needsScores) { Query query = context.query(); - weight = context.searcher().createNormalizedWeight(query, true); + weight = context.searcher().createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE, 1f); } deferredCollectors.preCollection(); for (Entry entry : entries) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java index d0f2d6ef9461a..9bf51e57df06d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Query; import org.apache.lucene.util.DocIdSetBuilder; -import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util.FutureArrays; import java.io.IOException; import java.util.function.ToLongFunction; @@ -147,8 +147,10 @@ public void visit(int docID, byte[] packedValue) throws IOException { @Override public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) { - if ((upperPointQuery != null && StringHelper.compare(bytesPerDim, minPackedValue, 0, upperPointQuery, 0) > 0) || - (lowerPointQuery != null && StringHelper.compare(bytesPerDim, maxPackedValue, 0, lowerPointQuery, 0) < 0)) { + if ((upperPointQuery != null && + FutureArrays.compareUnsigned(minPackedValue, 0, bytesPerDim, upperPointQuery, 0, bytesPerDim) > 0) || + (lowerPointQuery != null && + FutureArrays.compareUnsigned(maxPackedValue, 0, bytesPerDim, lowerPointQuery, 0, bytesPerDim) < 0)) { // does not match the query return PointValues.Relation.CELL_OUTSIDE_QUERY; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 4b54dccbf96c1..c8b1e630b8549 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; @@ -58,7 +59,7 @@ public Weight getWeight() { if (weight == null) { IndexSearcher contextSearcher = context.searcher(); try { - weight = contextSearcher.createNormalizedWeight(filter, false); + weight = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } catch (IOException e) { throw new AggregationInitializationException("Failed to initialse filter", e); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index 048042f05ff65..81a78632d4bd6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.Aggregator; @@ -74,7 +75,7 @@ public Weight[] getWeights() { IndexSearcher contextSearcher = context.searcher(); weights = new Weight[filters.length]; for (int i = 0; i < filters.length; ++i) { - this.weights[i] = contextSearcher.createNormalizedWeight(filters[i], false); + this.weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filters[i]), ScoreMode.COMPLETE_NO_SCORES, 1); } } catch (IOException e) { throw new AggregationInitializationException("Failed to initialse filters for aggregation [" + name() + "]", e); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index ec54abb334056..700145b94fa56 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.aggregations.Aggregator; @@ -57,8 +58,11 @@ public class GeoHashGridAggregator extends BucketsAggregator { } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index f86145386f1df..b15804f8c6e86 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; @@ -78,8 +79,11 @@ class AutoDateHistogramAggregator extends DeferableBucketAggregator { } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8b1f0c4642160..735a6717210a5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; @@ -86,8 +87,11 @@ class DateHistogramAggregator extends BucketsAggregator { } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index a0e4871a7df42..e72b609494b75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; @@ -87,8 +88,11 @@ class HistogramAggregator extends BucketsAggregator { } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index a85225e846372..ef9c1969c413b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; @@ -75,7 +76,7 @@ public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final L IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx); IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); - Weight weight = searcher.createNormalizedWeight(childFilter, false); + Weight weight = searcher.createWeight(searcher.rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer childDocsScorer = weight.scorer(ctx); final BitSet parentDocs = parentFilter.getBitSet(ctx); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index 14c1cc8818704..b8b0cf293a371 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; @@ -91,8 +92,11 @@ public BinaryRangeAggregator(String name, AggregatorFactories factories, } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index c490b344bdbce..9050f1e49f1ad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -246,8 +247,11 @@ public RangeAggregator(String name, AggregatorFactories factories, ValuesSource. } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index bb89173e76791..4e63d693d1875 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; @@ -70,8 +71,8 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme } @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } /** Set the deferred collectors. */ @@ -102,7 +103,7 @@ public void collect(int doc, long bucket) throws IOException { // Designed to be overridden by subclasses that may score docs by criteria // other than Lucene score protected TopDocsCollector createTopDocsCollector(int size) throws IOException { - return TopScoreDocCollector.create(size); + return TopScoreDocCollector.create(size, Integer.MAX_VALUE); } @Override @@ -280,6 +281,11 @@ public void collect(int docId, long parentBucket) throws IOException { sampler.collect(docId); maxDocId = Math.max(maxDocId, docId); } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } } public int getDocCount(long parentBucket) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 59e491705c69e..d4995f75616a7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -149,8 +150,8 @@ public String toString() { } @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index 7cd2c4e9b3a85..90aa633ffc5f8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; @@ -61,8 +62,11 @@ public LongTermsAggregator(String name, AggregatorFactories factories, ValuesSou } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } protected SortedNumericDocValues getValues(ValuesSource.Numeric valuesSource, LeafReaderContext ctx) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java index 95bc83ad88fd6..5bd8a8cd1d09d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lease.Releasables; @@ -64,8 +65,11 @@ public StringTermsAggregator(String name, AggregatorFactories factories, ValuesS } @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + public ScoreMode scoreMode() { + if (valuesSource != null && valuesSource.needsScores()) { + return ScoreMode.COMPLETE; + } + return super.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java index f3b867307d172..c1bdc85fb02e7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -225,7 +225,7 @@ static boolean descendsFromNestedAggregator(Aggregator parent) { private boolean subAggsNeedScore() { for (Aggregator subAgg : subAggregators) { - if (subAgg.needsScores()) { + if (subAgg.scoreMode().needsScores()) { return true; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java index 27890efbff182..042618011f16d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/AvgAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.avg; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -61,8 +62,8 @@ public AvgAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFor } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java index 7a8483b1b26ee..0df6b69681937 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.RamUsageEstimator; @@ -71,8 +72,8 @@ public CardinalityAggregator(String name, ValuesSource valuesSource, int precisi } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } private Collector pickCollector(LeafReaderContext ctx) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java index ff76e6637baf4..bd73470ff407d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.max; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -60,8 +61,8 @@ public MaxAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFor } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java index e4b371514bdf9..0f5dd36cb4930 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.min; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -59,8 +60,8 @@ public MinAggregator(String name, ValuesSource.Numeric valuesSource, DocValueFor } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractHDRPercentilesAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractHDRPercentilesAggregator.java index 47c267aae903e..56cd7eefbf203 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractHDRPercentilesAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractHDRPercentilesAggregator.java @@ -21,6 +21,7 @@ import org.HdrHistogram.DoubleHistogram; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.common.util.BigArrays; @@ -65,8 +66,8 @@ public AbstractHDRPercentilesAggregator(String name, ValuesSource.Numeric values } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java index 1b5ed510f8d61..802e1b0257cea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractTDigestPercentilesAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.common.util.BigArrays; @@ -64,8 +65,8 @@ public AbstractTDigestPercentilesAggregator(String name, ValuesSource.Numeric va } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index ea7bf270b8b62..8a49530f0d3da 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.metrics.scripted; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.script.ScriptedMetricAggContexts; @@ -55,8 +56,8 @@ protected ScriptedMetricAggregator(String name, ScriptedMetricAggContexts.MapScr } @Override - public boolean needsScores() { - return true; // TODO: how can we know if the script relies on scores? + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; // TODO: how can we know if the script relies on scores? } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java index 321e9e10f0fe8..42d14d05fecb4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.stats; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -70,8 +71,8 @@ public StatsAggregator(String name, ValuesSource.Numeric valuesSource, DocValueF } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 8339c06aefdcc..1089d2e1b9796 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.stats.extended; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -78,8 +79,8 @@ public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, D } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index 9ed8103a1e1ee..56122c6f3dac4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.sum; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -57,8 +58,8 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { } @Override - public boolean needsScores() { - return valuesSource != null && valuesSource.needsScores(); + public ScoreMode scoreMode() { + return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index 58fac4b952048..8b6fa373212b5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -23,9 +23,11 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits.Relation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -43,10 +45,10 @@ public class InternalTopHits extends InternalAggregation implements TopHits { private int from; private int size; - private TopDocs topDocs; + private TopDocsAndMaxScore topDocs; private SearchHits searchHits; - public InternalTopHits(String name, int from, int size, TopDocs topDocs, SearchHits searchHits, + public InternalTopHits(String name, int from, int size, TopDocsAndMaxScore topDocs, SearchHits searchHits, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.from = from; @@ -85,7 +87,7 @@ public SearchHits getHits() { return searchHits; } - TopDocs getTopDocs() { + TopDocsAndMaxScore getTopDocs() { return topDocs; } @@ -115,12 +117,12 @@ public InternalAggregation doReduce(List aggregations, Redu final TopDocs reducedTopDocs; final TopDocs[] shardDocs; - if (topDocs instanceof TopFieldDocs) { - Sort sort = new Sort(((TopFieldDocs) topDocs).fields); + if (topDocs.topDocs instanceof TopFieldDocs) { + Sort sort = new Sort(((TopFieldDocs) topDocs.topDocs).fields); shardDocs = new TopFieldDocs[aggregations.size()]; for (int i = 0; i < shardDocs.length; i++) { InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs; + shardDocs[i] = topHitsAgg.topDocs.topDocs; shardHits[i] = topHitsAgg.searchHits; } reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs, true); @@ -128,12 +130,24 @@ public InternalAggregation doReduce(List aggregations, Redu shardDocs = new TopDocs[aggregations.size()]; for (int i = 0; i < shardDocs.length; i++) { InternalTopHits topHitsAgg = (InternalTopHits) aggregations.get(i); - shardDocs[i] = topHitsAgg.topDocs; + shardDocs[i] = topHitsAgg.topDocs.topDocs; shardHits[i] = topHitsAgg.searchHits; } reducedTopDocs = TopDocs.merge(from, size, shardDocs, true); } + float maxScore = Float.NaN; + for (InternalAggregation agg : aggregations) { + InternalTopHits topHitsAgg = (InternalTopHits) agg; + if (Float.isNaN(topHitsAgg.topDocs.maxScore) == false) { + if (Float.isNaN(maxScore)) { + maxScore = topHitsAgg.topDocs.maxScore; + } else { + maxScore = Math.max(maxScore, topHitsAgg.topDocs.maxScore); + } + } + } + final int[] tracker = new int[shardHits.length]; SearchHit[] hits = new SearchHit[reducedTopDocs.scoreDocs.length]; for (int i = 0; i < reducedTopDocs.scoreDocs.length; i++) { @@ -144,9 +158,10 @@ public InternalAggregation doReduce(List aggregations, Redu } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); hits[i] = shardHits[scoreDoc.shardIndex].getAt(position); } - return new InternalTopHits(name, this.from, this.size, reducedTopDocs, new SearchHits(hits, reducedTopDocs.totalHits, - reducedTopDocs.getMaxScore()), - pipelineAggregators(), getMetaData()); + assert reducedTopDocs.totalHits.relation == Relation.EQUAL_TO; + return new InternalTopHits(name, this.from, this.size, + new TopDocsAndMaxScore(reducedTopDocs, maxScore), + new SearchHits(hits, reducedTopDocs.totalHits.value, maxScore), pipelineAggregators(), getMetaData()); } @Override @@ -170,11 +185,12 @@ protected boolean doEquals(Object obj) { InternalTopHits other = (InternalTopHits) obj; if (from != other.from) return false; if (size != other.size) return false; - if (topDocs.totalHits != other.topDocs.totalHits) return false; - if (topDocs.scoreDocs.length != other.topDocs.scoreDocs.length) return false; - for (int d = 0; d < topDocs.scoreDocs.length; d++) { - ScoreDoc thisDoc = topDocs.scoreDocs[d]; - ScoreDoc otherDoc = other.topDocs.scoreDocs[d]; + if (topDocs.topDocs.totalHits.value != other.topDocs.topDocs.totalHits.value) return false; + if (topDocs.topDocs.totalHits.relation != other.topDocs.topDocs.totalHits.relation) return false; + if (topDocs.topDocs.scoreDocs.length != other.topDocs.topDocs.scoreDocs.length) return false; + for (int d = 0; d < topDocs.topDocs.scoreDocs.length; d++) { + ScoreDoc thisDoc = topDocs.topDocs.scoreDocs[d]; + ScoreDoc otherDoc = other.topDocs.topDocs.scoreDocs[d]; if (thisDoc.doc != otherDoc.doc) return false; if (Double.compare(thisDoc.score, otherDoc.score) != 0) return false; if (thisDoc.shardIndex != otherDoc.shardIndex) return false; @@ -195,9 +211,10 @@ protected boolean doEquals(Object obj) { protected int doHashCode() { int hashCode = from; hashCode = 31 * hashCode + size; - hashCode = 31 * hashCode + Long.hashCode(topDocs.totalHits); - for (int d = 0; d < topDocs.scoreDocs.length; d++) { - ScoreDoc doc = topDocs.scoreDocs[d]; + hashCode = 31 * hashCode + Long.hashCode(topDocs.topDocs.totalHits.value); + hashCode = 31 * hashCode + topDocs.topDocs.totalHits.relation.hashCode(); + for (int d = 0; d < topDocs.topDocs.scoreDocs.length; d++) { + ScoreDoc doc = topDocs.topDocs.scoreDocs[d]; hashCode = 31 * hashCode + doc.doc; hashCode = 31 * hashCode + Float.floatToIntBits(doc.score); hashCode = 31 * hashCode + doc.shardIndex; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java index e59299754aead..48a42b74292c2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java @@ -23,18 +23,24 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TopScoreDocCollector; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -57,9 +63,21 @@ public class TopHitsAggregator extends MetricsAggregator { + private static class Collectors { + public final TopDocsCollector topDocsCollector; + public final MaxScoreCollector maxScoreCollector; + public final Collector collector; + + Collectors(TopDocsCollector topDocsCollector, MaxScoreCollector maxScoreCollector) { + this.topDocsCollector = topDocsCollector; + this.maxScoreCollector = maxScoreCollector; + collector = MultiCollector.wrap(topDocsCollector, maxScoreCollector); + } + } + private final FetchPhase fetchPhase; private final SubSearchContext subSearchContext; - private final LongObjectPagedHashMap> topDocsCollectors; + private final LongObjectPagedHashMap topDocsCollectors; TopHitsAggregator(FetchPhase fetchPhase, SubSearchContext subSearchContext, String name, SearchContext context, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { @@ -70,13 +88,13 @@ public class TopHitsAggregator extends MetricsAggregator { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { SortAndFormats sort = subSearchContext.sort(); if (sort != null) { - return sort.sort.needsScores() || subSearchContext.trackScores(); + return sort.sort.needsScores() || subSearchContext.trackScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } else { // sort by score - return true; + return ScoreMode.COMPLETE; } } @@ -102,8 +120,8 @@ public void setScorer(Scorer scorer) throws IOException { @Override public void collect(int docId, long bucket) throws IOException { - TopDocsCollector topDocsCollector = topDocsCollectors.get(bucket); - if (topDocsCollector == null) { + Collectors collectors = topDocsCollectors.get(bucket); + if (collectors == null) { SortAndFormats sort = subSearchContext.sort(); int topN = subSearchContext.from() + subSearchContext.size(); if (sort == null) { @@ -115,20 +133,21 @@ public void collect(int docId, long bucket) throws IOException { // but here we create collectors ourselves and we need prevent OOM because of crazy an offset and size. topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc()); if (sort == null) { - topDocsCollector = TopScoreDocCollector.create(topN); + collectors = new Collectors(TopScoreDocCollector.create(topN, Integer.MAX_VALUE), null); } else { // TODO: can we pass trackTotalHits=subSearchContext.trackTotalHits(){ // Note that this would require to catch CollectionTerminatedException - topDocsCollector = TopFieldCollector.create(sort.sort, topN, true, subSearchContext.trackScores(), - subSearchContext.trackScores(), true); + collectors = new Collectors( + TopFieldCollector.create(sort.sort, topN, Integer.MAX_VALUE), + subSearchContext.trackScores() ? new MaxScoreCollector() : null); } - topDocsCollectors.put(bucket, topDocsCollector); + topDocsCollectors.put(bucket, collectors); } final LeafCollector leafCollector; final int key = leafCollectors.indexOf(bucket); if (key < 0) { - leafCollector = topDocsCollector.getLeafCollector(ctx); + leafCollector = collectors.collector.getLeafCollector(ctx); if (scorer != null) { leafCollector.setScorer(scorer); } @@ -142,58 +161,65 @@ public void collect(int docId, long bucket) throws IOException { } @Override - public InternalAggregation buildAggregation(long owningBucketOrdinal) { - TopDocsCollector topDocsCollector = topDocsCollectors.get(owningBucketOrdinal); - final InternalTopHits topHits; - if (topDocsCollector == null) { - topHits = buildEmptyAggregation(); - } else { - TopDocs topDocs = topDocsCollector.topDocs(); - if (subSearchContext.sort() == null) { - for (RescoreContext ctx : context().rescore()) { - try { - topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); - } catch (IOException e) { - throw new ElasticsearchException("Rescore TopHits Failed", e); - } + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + Collectors collectors = topDocsCollectors.get(owningBucketOrdinal); + if (collectors == null) { + return buildEmptyAggregation(); + } + TopDocsCollector topDocsCollector = collectors.topDocsCollector; + TopDocs topDocs = topDocsCollector.topDocs(); + float maxScore = Float.NaN; + if (subSearchContext.sort() == null) { + for (RescoreContext ctx : context().rescore()) { + try { + topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); + } catch (IOException e) { + throw new ElasticsearchException("Rescore TopHits Failed", e); } } - subSearchContext.queryResult().topDocs(topDocs, - subSearchContext.sort() == null ? null : subSearchContext.sort().formats); - int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; - for (int i = 0; i < topDocs.scoreDocs.length; i++) { - docIdsToLoad[i] = topDocs.scoreDocs[i].doc; + if (topDocs.scoreDocs.length > 0) { + maxScore = topDocs.scoreDocs[0].score; } - subSearchContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); - fetchPhase.execute(subSearchContext); - FetchSearchResult fetchResult = subSearchContext.fetchResult(); - SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); - for (int i = 0; i < internalHits.length; i++) { - ScoreDoc scoreDoc = topDocs.scoreDocs[i]; - SearchHit searchHitFields = internalHits[i]; - searchHitFields.shard(subSearchContext.shardTarget()); - searchHitFields.score(scoreDoc.score); - if (scoreDoc instanceof FieldDoc) { - FieldDoc fieldDoc = (FieldDoc) scoreDoc; - searchHitFields.sortValues(fieldDoc.fields, subSearchContext.sort().formats); - } + } else if (subSearchContext.trackScores()) { + TopFieldCollector.populateScores(topDocs.scoreDocs, subSearchContext.searcher(), subSearchContext.query()); + maxScore = collectors.maxScoreCollector.getMaxScore(); + } + final TopDocsAndMaxScore topDocsAndMaxScore = new TopDocsAndMaxScore(topDocs, maxScore); + subSearchContext.queryResult().topDocs(topDocsAndMaxScore, + subSearchContext.sort() == null ? null : subSearchContext.sort().formats); + int[] docIdsToLoad = new int[topDocs.scoreDocs.length]; + for (int i = 0; i < topDocs.scoreDocs.length; i++) { + docIdsToLoad[i] = topDocs.scoreDocs[i].doc; + } + subSearchContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); + fetchPhase.execute(subSearchContext); + FetchSearchResult fetchResult = subSearchContext.fetchResult(); + SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); + for (int i = 0; i < internalHits.length; i++) { + ScoreDoc scoreDoc = topDocs.scoreDocs[i]; + SearchHit searchHitFields = internalHits[i]; + searchHitFields.shard(subSearchContext.shardTarget()); + searchHitFields.score(scoreDoc.score); + if (scoreDoc instanceof FieldDoc) { + FieldDoc fieldDoc = (FieldDoc) scoreDoc; + searchHitFields.sortValues(fieldDoc.fields, subSearchContext.sort().formats); } - topHits = new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits(), - pipelineAggregators(), metaData()); } - return topHits; + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocsAndMaxScore, fetchResult.hits(), + pipelineAggregators(), metaData()); } @Override public InternalTopHits buildEmptyAggregation() { TopDocs topDocs; if (subSearchContext.sort() != null) { - topDocs = new TopFieldDocs(0, new FieldDoc[0], subSearchContext.sort().sort.getSort(), Float.NaN); + topDocs = new TopFieldDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new FieldDoc[0], + subSearchContext.sort().sort.getSort()); } else { topDocs = Lucene.EMPTY_TOP_DOCS; } - return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, SearchHits.empty(), - pipelineAggregators(), metaData()); + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), new TopDocsAndMaxScore(topDocs, Float.NaN), + SearchHits.empty(), pipelineAggregators(), metaData()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java index 7a34fe6df4a68..0d9c2b1bc3b83 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/weighted_avg/WeightedAvgAggregator.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics.weighted_avg; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; @@ -67,8 +68,8 @@ public WeightedAvgAggregator(String name, MultiValuesSource.NumericMultiValuesSo } @Override - public boolean needsScores() { - return valuesSources != null && valuesSources.needsScores(); + public ScoreMode scoreMode() { + return valuesSources != null && valuesSources.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java index 82a7657f18079..4d8a1ba63ba15 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseContext.java @@ -60,11 +60,11 @@ public List getInnerHit() { return innerHits; } - public CollapsingTopDocsCollector createTopDocs(Sort sort, int topN, boolean trackMaxScore) { + public CollapsingTopDocsCollector createTopDocs(Sort sort, int topN) { if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) { - return CollapsingTopDocsCollector.createKeyword(fieldType.name(), sort, topN, trackMaxScore); + return CollapsingTopDocsCollector.createKeyword(fieldType.name(), sort, topN); } else if (fieldType instanceof NumberFieldMapper.NumberFieldType) { - return CollapsingTopDocsCollector.createNumeric(fieldType.name(), sort, topN, trackMaxScore); + return CollapsingTopDocsCollector.createNumeric(fieldType.name(), sort, topN); } else { throw new IllegalStateException("unknown type for collapse field " + fieldType.name() + ", only keywords and numbers are accepted"); diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index fa7e611348d78..0b7d8da481c62 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -25,8 +25,9 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermStatistics; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.search.SearchPhase; @@ -53,7 +54,8 @@ public void preProcess(SearchContext context) { public void execute(SearchContext context) { final ObjectHashSet termsSet = new ObjectHashSet<>(); try { - context.searcher().createNormalizedWeight(context.query(), true).extractTerms(new DelegateSet(termsSet)); + context.searcher().createWeight(context.searcher().rewrite(context.query()), ScoreMode.COMPLETE, 1f) + .extractTerms(new DelegateSet(termsSet)); for (RescoreContext rescoreContext : context.rescore()) { try { rescoreContext.rescorer().extractTerms(context.searcher(), rescoreContext, new DelegateSet(termsSet)); @@ -69,17 +71,19 @@ public void execute(SearchContext context) { if(context.isCancelled()) { throw new TaskCancelledException("cancelled"); } - // LUCENE 4 UPGRADE: cache TermContext? - TermContext termContext = TermContext.build(indexReaderContext, terms[i]); + // LUCENE 4 UPGRADE: cache TermStates? + TermStates termContext = TermStates.build(indexReaderContext, terms[i], true); termStatistics[i] = context.searcher().termStatistics(terms[i], termContext); } ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); for (Term term : terms) { assert term.field() != null : "field is null"; - if (!fieldStatistics.containsKey(term.field())) { + if (fieldStatistics.containsKey(term.field()) == false) { final CollectionStatistics collectionStatistics = context.searcher().collectionStatistics(term.field()); - fieldStatistics.put(term.field(), collectionStatistics); + if (collectionStatistics != null) { + fieldStatistics.put(term.field(), collectionStatistics); + } if(context.isCancelled()) { throw new TaskCancelledException("cancelled"); } diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index 0cd624b00a36b..8de89089c4f01 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -124,9 +125,16 @@ public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap= 0; out.writeVLong(statistics.maxDoc()); - out.writeVLong(addOne(statistics.docCount())); - out.writeVLong(addOne(statistics.sumTotalTermFreq())); - out.writeVLong(addOne(statistics.sumDocFreq())); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + // stats are always positive numbers + out.writeVLong(statistics.docCount()); + out.writeVLong(statistics.sumTotalTermFreq()); + out.writeVLong(statistics.sumDocFreq()); + } else { + out.writeVLong(addOne(statistics.docCount())); + out.writeVLong(addOne(statistics.sumTotalTermFreq())); + out.writeVLong(addOne(statistics.sumDocFreq())); + } } } @@ -138,9 +146,14 @@ public static void writeTermStats(StreamOutput out, TermStatistics[] termStatist } public static void writeSingleTermStats(StreamOutput out, TermStatistics termStatistic) throws IOException { - assert termStatistic.docFreq() >= 0; - out.writeVLong(termStatistic.docFreq()); - out.writeVLong(addOne(termStatistic.totalTermFreq())); + if (termStatistic != null) { + assert termStatistic.docFreq() > 0; + out.writeVLong(termStatistic.docFreq()); + out.writeVLong(addOne(termStatistic.totalTermFreq())); + } else { + out.writeVLong(0); + out.writeVLong(0); + } } public static ObjectObjectHashMap readFieldStats(StreamInput in) throws IOException { @@ -156,9 +169,19 @@ public static ObjectObjectHashMap readFieldStats(S final String field = in.readString(); assert field != null; final long maxDoc = in.readVLong(); - final long docCount = subOne(in.readVLong()); - final long sumTotalTermFreq = subOne(in.readVLong()); - final long sumDocFreq = subOne(in.readVLong()); + final long docCount; + final long sumTotalTermFreq; + final long sumDocFreq; + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + // stats are always positive numbers + docCount = in.readVLong(); + sumTotalTermFreq = in.readVLong(); + sumDocFreq = in.readVLong(); + } else { + docCount = subOne(in.readVLong()); + sumTotalTermFreq = subOne(in.readVLong()); + sumDocFreq = subOne(in.readVLong()); + } CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } @@ -178,6 +201,9 @@ public static TermStatistics[] readTermStats(StreamInput in, Term[] terms) throw final long docFreq = in.readVLong(); assert docFreq >= 0; final long totalTermFreq = subOne(in.readVLong()); + if (docFreq == 0) { + continue; + } termStatistics[i] = new TermStatistics(term, docFreq, totalTermFreq); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index f745ee1163c16..69ac90496864b 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -23,7 +23,10 @@ import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitSet; import org.elasticsearch.ExceptionsHelper; @@ -168,7 +171,9 @@ public void execute(SearchContext context) { } } - context.fetchResult().hits(new SearchHits(hits, context.queryResult().getTotalHits(), context.queryResult().getMaxScore())); + TotalHits totalHits = context.queryResult().getTotalHits(); + long totalHitsAsLong = totalHits.relation == Relation.EQUAL_TO ? totalHits.value : -1; + context.fetchResult().hits(new SearchHits(hits, totalHitsAsLong, context.queryResult().getMaxScore())); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } @@ -357,7 +362,8 @@ private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context current = nestedParentObjectMapper; continue; } - final Weight childWeight = context.searcher().createNormalizedWeight(childFilter, false); + final Weight childWeight = context.searcher() + .createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer childScorer = childWeight.scorer(subReaderContext); if (childScorer == null) { current = nestedParentObjectMapper; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index d3b1da7c9376e..48f2f1299c2ea 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -25,11 +25,13 @@ import org.apache.lucene.search.ConjunctionDISI; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.internal.SearchContext; @@ -87,7 +89,7 @@ protected InnerHitSubContext(String name, SearchContext context) { this.context = context; } - public abstract TopDocs[] topDocs(SearchHit[] hits) throws IOException; + public abstract TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException; public String getName() { return name; @@ -104,7 +106,8 @@ public void setChildInnerHits(Map childInnerHits) { protected Weight createInnerHitQueryWeight() throws IOException { final boolean needsScores = size() != 0 && (sort() == null || sort().sort.needsScores()); - return context.searcher().createNormalizedWeight(query(), needsScores); + return context.searcher().createWeight(context.searcher().rewrite(query()), + needsScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES, 1f); } public SearchContext parentSearchContext() { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java index 75d6211aca4bf..4d34a3afa620f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TopDocs; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -50,19 +50,19 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept for (Map.Entry entry : context.innerHits().getInnerHits().entrySet()) { InnerHitsContext.InnerHitSubContext innerHits = entry.getValue(); - TopDocs[] topDocs = innerHits.topDocs(hits); + TopDocsAndMaxScore[] topDocs = innerHits.topDocs(hits); for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; - TopDocs topDoc = topDocs[i]; + TopDocsAndMaxScore topDoc = topDocs[i]; Map results = hit.getInnerHits(); if (results == null) { hit.setInnerHits(results = new HashMap<>()); } innerHits.queryResult().topDocs(topDoc, innerHits.sort() == null ? null : innerHits.sort().formats); - int[] docIdsToLoad = new int[topDoc.scoreDocs.length]; - for (int j = 0; j < topDoc.scoreDocs.length; j++) { - docIdsToLoad[j] = topDoc.scoreDocs[j].doc; + int[] docIdsToLoad = new int[topDoc.topDocs.scoreDocs.length]; + for (int j = 0; j < topDoc.topDocs.scoreDocs.length; j++) { + docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc; } innerHits.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); innerHits.setUid(new Uid(hit.getType(), hit.getId())); @@ -70,7 +70,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept FetchSearchResult fetchResult = innerHits.fetchResult(); SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits(); for (int j = 0; j < internalHits.length; j++) { - ScoreDoc scoreDoc = topDoc.scoreDocs[j]; + ScoreDoc scoreDoc = topDoc.topDocs.scoreDocs[j]; SearchHit searchHitFields = internalHits[j]; searchHitFields.score(scoreDoc.score); if (scoreDoc instanceof FieldDoc) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java index c28e07ff45526..c2f6980781dba 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/MatchedQueriesFetchSubPhase.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; @@ -67,7 +68,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) { Query query = entry.getValue(); int readerIndex = -1; int docBase = -1; - Weight weight = context.searcher().createNormalizedWeight(query, false); + Weight weight = context.searcher().createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); Bits matchingDocs = null; final IndexReader indexReader = context.searcher().getIndexReader(); for (int i = 0; i < hits.length; ++i) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScoreFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScoreFetchSubPhase.java new file mode 100644 index 0000000000000..3a6db72d5b31a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ScoreFetchSubPhase.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.fetch.subphase; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; +import org.apache.lucene.search.Weight; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; + +public class ScoreFetchSubPhase implements FetchSubPhase { + + @Override + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { + if (context.trackScores() == false || hits.length == 0 || + // scores were already computed since they are needed on the coordinated node to merge top hits + context.sort() == null) { + return; + } + + hits = hits.clone(); // don't modify the incoming hits + Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); + + final IndexSearcher searcher = context.searcher(); + final Weight weight = searcher.createWeight(searcher.rewrite(context.query()), ScoreMode.COMPLETE, 1); + Iterator leafContextIterator = searcher.getIndexReader().leaves().iterator(); + LeafReaderContext leafContext = null; + Scorer scorer = null; + for (SearchHit hit : hits) { + if (leafContext == null || leafContext.docBase + leafContext.reader().maxDoc() <= hit.docId()) { + do { + leafContext = leafContextIterator.next(); + } while (leafContext == null || leafContext.docBase + leafContext.reader().maxDoc() <= hit.docId()); + ScorerSupplier scorerSupplier = weight.scorerSupplier(leafContext); + if (scorerSupplier == null) { + throw new IllegalStateException("Can't compute score on document " + hit + " as it doesn't match the query"); + } + scorer = scorerSupplier.get(1L); // random-access + } + + final int leafDocID = hit.docId() - leafContext.docBase; + assert leafDocID >= 0 && leafDocID < leafContext.reader().maxDoc(); + int advanced = scorer.iterator().advance(leafDocID); + if (advanced != leafDocID) { + throw new IllegalStateException("Can't compute score on document " + hit + " as it doesn't match the query"); + } + hit.score(scorer.score()); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index a7eb0a953ba58..04a4629e9a875 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -22,7 +22,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; -import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermStates; import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.Collector; @@ -31,6 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.Weight; @@ -71,7 +72,7 @@ public ContextIndexSearcher(Engine.Searcher searcher, super(searcher.reader()); in = searcher.searcher(); engineSearcher = searcher; - setSimilarity(searcher.searcher().getSimilarity(true)); + setSimilarity(searcher.searcher().getSimilarity()); setQueryCache(queryCache); setQueryCachingPolicy(queryCachingPolicy); } @@ -112,22 +113,7 @@ public Query rewrite(Query original) throws IOException { } @Override - public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { - // During tests we prefer to use the wrapped IndexSearcher, because then we use the AssertingIndexSearcher - // it is hacky, because if we perform a dfs search, we don't use the wrapped IndexSearcher... - if (aggregatedDfs != null && needsScores) { - // if scores are needed and we have dfs data then use it - return super.createNormalizedWeight(query, needsScores); - } else if (profiler != null) { - // we need to use the createWeight method to insert the wrappers - return super.createNormalizedWeight(query, needsScores); - } else { - return in.createNormalizedWeight(query, needsScores); - } - } - - @Override - public Weight createWeight(Query query, boolean needsScores, float boost) throws IOException { + public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws IOException { if (profiler != null) { // createWeight() is called for each query in the tree, so we tell the queryProfiler // each invocation so that it can build an internal representation of the query @@ -137,7 +123,7 @@ public Weight createWeight(Query query, boolean needsScores, float boost) throws timer.start(); final Weight weight; try { - weight = super.createWeight(query, needsScores, boost); + weight = super.createWeight(query, scoreMode, boost); } finally { timer.stop(); profiler.pollLastElement(); @@ -145,7 +131,7 @@ public Weight createWeight(Query query, boolean needsScores, float boost) throws return new ProfileWeight(query, weight, profile); } else { // needs to be 'super', not 'in' in order to use aggregated DFS - return super.createWeight(query, needsScores, boost); + return super.createWeight(query, scoreMode, boost); } } @@ -195,13 +181,13 @@ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { public Explanation explain(Query query, int doc) throws IOException { if (aggregatedDfs != null) { // dfs data is needed to explain the score - return super.explain(createNormalizedWeight(query, true), doc); + return super.explain(createWeight(rewrite(query), ScoreMode.COMPLETE, 1f), doc); } return in.explain(query, doc); } @Override - public TermStatistics termStatistics(Term term, TermContext context) throws IOException { + public TermStatistics termStatistics(Term term, TermStates context) throws IOException { if (aggregatedDfs == null) { // we are either executing the dfs phase or the search_type doesn't include the dfs phase. return super.termStatistics(term, context); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java b/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java index 75d48d5d63798..41d7680a780b0 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.internal; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.search.Scroll; import java.util.HashMap; @@ -30,8 +31,8 @@ public final class ScrollContext { private Map context = null; - public long totalHits = -1; - public float maxScore; + public TotalHits totalHits = null; + public float maxScore = Float.NaN; public ScoreDoc lastEmittedDoc; public Scroll scroll; diff --git a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java index 0cdeb458a3031..16388fa789aff 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.profile.aggregation; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; @@ -45,8 +46,8 @@ public void close() { } @Override - public boolean needsScores() { - return delegate.needsScores(); + public ScoreMode scoreMode() { + return delegate.scoreMode(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java b/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java index e892abaab2249..993d91ab7a18c 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/InternalProfileCollector.java @@ -22,6 +22,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import java.io.IOException; import java.util.ArrayList; @@ -116,8 +117,8 @@ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOExcept } @Override - public boolean needsScores() { - return collector.needsScores(); + public ScoreMode scoreMode() { + return collector.scoreMode(); } public CollectorResult getCollectorTree() { diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java index ea8dbb2f335ca..940e3902954b5 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileCollector.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.FilterCollector; import org.apache.lucene.search.FilterLeafCollector; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import java.io.IOException; @@ -44,10 +45,10 @@ public Collector getDelegate() { } @Override - public boolean needsScores() { + public ScoreMode scoreMode() { final long start = System.nanoTime(); try { - return super.needsScores(); + return super.scoreMode(); } finally { time += Math.max(1, System.nanoTime() - start); } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java index 66e0e0fe77cfe..8913f484847e6 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/ProfileScorer.java @@ -36,7 +36,7 @@ final class ProfileScorer extends Scorer { private final Scorer scorer; private ProfileWeight profileWeight; - private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer; + private final Timer scoreTimer, nextDocTimer, advanceTimer, matchTimer, shallowAdvanceTimer, computeMaxScoreTimer; ProfileScorer(ProfileWeight w, Scorer scorer, QueryProfileBreakdown profile) throws IOException { super(w); @@ -46,6 +46,8 @@ final class ProfileScorer extends Scorer { nextDocTimer = profile.getTimer(QueryTimingType.NEXT_DOC); advanceTimer = profile.getTimer(QueryTimingType.ADVANCE); matchTimer = profile.getTimer(QueryTimingType.MATCH); + shallowAdvanceTimer = profile.getTimer(QueryTimingType.SHALLOW_ADVANCE); + computeMaxScoreTimer = profile.getTimer(QueryTimingType.COMPUTE_MAX_SCORE); } @Override @@ -166,4 +168,24 @@ public float matchCost() { } }; } + + @Override + public int advanceShallow(int target) throws IOException { + shallowAdvanceTimer.start(); + try { + return scorer.advanceShallow(target); + } finally { + shallowAdvanceTimer.stop(); + } + } + + @Override + public float getMaxScore(int upTo) throws IOException { + computeMaxScoreTimer.start(); + try { + return scorer.getMaxScore(upTo); + } finally { + computeMaxScoreTimer.stop(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java index 5f194a7d5f10d..146bd8f07bcd1 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/QueryTimingType.java @@ -27,7 +27,9 @@ public enum QueryTimingType { NEXT_DOC, ADVANCE, MATCH, - SCORE; + SCORE, + SHALLOW_ADVANCE, + COMPUTE_MAX_SCORE; @Override public String toString() { diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java index ff80dda77fb6d..f0c94bd822edf 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryCollectorContext.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.common.lucene.MinimumScoreCollector; import org.elasticsearch.common.lucene.search.FilteredCollector; @@ -114,7 +115,7 @@ static QueryCollectorContext createFilteredCollectorContext(IndexSearcher search return new QueryCollectorContext(REASON_SEARCH_POST_FILTER) { @Override Collector create(Collector in ) throws IOException { - final Weight filterWeight = searcher.createNormalizedWeight(query, false); + final Weight filterWeight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); return new FilteredCollector(in, filterWeight); } }; diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 84c76e85f3dd0..e4f0aa6898ad8 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.EarlyTerminatingSortingCollector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; @@ -35,9 +34,11 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor; @@ -94,8 +95,8 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep if (searchContext.hasOnlySuggest()) { suggestPhase.execute(searchContext); // TODO: fix this once we can fetch docs for suggestions - searchContext.queryResult().topDocs( - new TopDocs(0, Lucene.EMPTY_SCORE_DOCS, Float.NaN), + searchContext.queryResult().topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN), new DocValueFormat[0]); return; } @@ -138,7 +139,7 @@ static boolean execute(SearchContext searchContext, final ScrollContext scrollContext = searchContext.scrollContext(); if (scrollContext != null) { - if (scrollContext.totalHits == -1) { + if (scrollContext.totalHits == null) { // first round assert scrollContext.lastEmittedDoc == null; // there is not much that we can optimize here since we want to collect all @@ -268,7 +269,7 @@ static boolean execute(SearchContext searchContext, queryResult.terminatedEarly(true); } catch (TimeExceededException e) { assert timeoutSet : "TimeExceededException thrown even though timeout wasn't set"; - + if (searchContext.request().allowPartialSearchResults() == false) { // Can't rethrow TimeExceededException because not serializable throw new QueryPhaseExecutionException(searchContext, "Time exceeded"); @@ -327,7 +328,7 @@ static boolean canEarlyTerminate(IndexReader reader, SortAndFormats sortAndForma final Sort sort = sortAndFormats.sort; for (LeafReaderContext ctx : reader.leaves()) { Sort indexSort = ctx.reader().getMetaData().getSort(); - if (indexSort == null || EarlyTerminatingSortingCollector.canEarlyTerminate(sort, indexSort) == false) { + if (indexSort == null || Lucene.canEarlyTerminate(sort, indexSort) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 83c43d10172c2..2aded57ece04c 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,10 +20,11 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -46,7 +47,10 @@ public final class QuerySearchResult extends SearchPhaseResult { private int from; private int size; - private TopDocs topDocs; + private TopDocsAndMaxScore topDocsAndMaxScore; + private boolean hasScoreDocs; + private TotalHits totalHits; + private float maxScore = Float.NaN; private DocValueFormat[] sortValueFormats; private InternalAggregations aggregations; private boolean hasAggs; @@ -56,9 +60,6 @@ public final class QuerySearchResult extends SearchPhaseResult { private Boolean terminatedEarly = null; private ProfileShardResult profileShardResults; private boolean hasProfileResults; - private boolean hasScoreDocs; - private long totalHits; - private float maxScore; private long serviceTimeEWMA = -1; private int nodeQueueSize = -1; @@ -92,37 +93,37 @@ public Boolean terminatedEarly() { return this.terminatedEarly; } - public TopDocs topDocs() { - if (topDocs == null) { + public TopDocsAndMaxScore topDocs() { + if (topDocsAndMaxScore == null) { throw new IllegalStateException("topDocs already consumed"); } - return topDocs; + return topDocsAndMaxScore; } /** * Returns true iff the top docs have already been consumed. */ public boolean hasConsumedTopDocs() { - return topDocs == null; + return topDocsAndMaxScore == null; } /** * Returns and nulls out the top docs for this search results. This allows to free up memory once the top docs are consumed. * @throws IllegalStateException if the top docs have already been consumed. */ - public TopDocs consumeTopDocs() { - TopDocs topDocs = this.topDocs; - if (topDocs == null) { + public TopDocsAndMaxScore consumeTopDocs() { + TopDocsAndMaxScore topDocsAndMaxScore = this.topDocsAndMaxScore; + if (topDocsAndMaxScore == null) { throw new IllegalStateException("topDocs already consumed"); } - this.topDocs = null; - return topDocs; + this.topDocsAndMaxScore = null; + return topDocsAndMaxScore; } - public void topDocs(TopDocs topDocs, DocValueFormat[] sortValueFormats) { + public void topDocs(TopDocsAndMaxScore topDocs, DocValueFormat[] sortValueFormats) { setTopDocs(topDocs); - if (topDocs.scoreDocs.length > 0 && topDocs.scoreDocs[0] instanceof FieldDoc) { - int numFields = ((FieldDoc) topDocs.scoreDocs[0]).fields.length; + if (topDocs.topDocs.scoreDocs.length > 0 && topDocs.topDocs.scoreDocs[0] instanceof FieldDoc) { + int numFields = ((FieldDoc) topDocs.topDocs.scoreDocs[0]).fields.length; if (numFields != sortValueFormats.length) { throw new IllegalArgumentException("The number of sort fields does not match: " + numFields + " != " + sortValueFormats.length); @@ -131,11 +132,11 @@ public void topDocs(TopDocs topDocs, DocValueFormat[] sortValueFormats) { this.sortValueFormats = sortValueFormats; } - private void setTopDocs(TopDocs topDocs) { - this.topDocs = topDocs; - hasScoreDocs = topDocs.scoreDocs.length > 0; - this.totalHits = topDocs.totalHits; - this.maxScore = topDocs.getMaxScore(); + private void setTopDocs(TopDocsAndMaxScore topDocsAndMaxScore) { + this.topDocsAndMaxScore = topDocsAndMaxScore; + this.totalHits = topDocsAndMaxScore.topDocs.totalHits; + this.maxScore = topDocsAndMaxScore.maxScore; + this.hasScoreDocs = topDocsAndMaxScore.topDocs.scoreDocs.length > 0; } public DocValueFormat[] sortValueFormats() { @@ -326,7 +327,7 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeNamedWriteable(sortValueFormats[i]); } } - writeTopDocs(out, topDocs); + writeTopDocs(out, topDocsAndMaxScore); if (aggregations == null) { out.writeBoolean(false); } else { @@ -349,7 +350,7 @@ public void writeToNoId(StreamOutput out) throws IOException { } } - public long getTotalHits() { + public TotalHits getTotalHits() { return totalHits; } diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index 8d40cc802fffd..d1b115ff68006 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -37,9 +37,14 @@ import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; +import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.internal.ScrollContext; @@ -49,7 +54,6 @@ import java.io.IOException; import java.util.Objects; -import java.util.function.IntSupplier; import java.util.function.Supplier; import static org.elasticsearch.search.profile.query.CollectorResult.REASON_SEARCH_COUNT; @@ -82,7 +86,7 @@ boolean shouldRescore() { static class EmptyTopDocsCollectorContext extends TopDocsCollectorContext { private final Collector collector; - private final IntSupplier hitCountSupplier; + private final Supplier hitCountSupplier; /** * Ctr @@ -100,15 +104,15 @@ private EmptyTopDocsCollectorContext(IndexReader reader, Query query, int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); if (hitCount == -1) { this.collector = hitCountCollector; - this.hitCountSupplier = hitCountCollector::getTotalHits; + this.hitCountSupplier = () -> new TotalHits(hitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO); } else { this.collector = new EarlyTerminatingCollector(hitCountCollector, 0, false); - this.hitCountSupplier = () -> hitCount; + this.hitCountSupplier = () -> new TotalHits(hitCount, TotalHits.Relation.EQUAL_TO); } } else { this.collector = new EarlyTerminatingCollector(new TotalHitCountCollector(), 0, false); // for bwc hit count is set to 0, it will be converted to -1 by the coordinating node - this.hitCountSupplier = () -> 0; + this.hitCountSupplier = () -> new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO); } } @@ -119,14 +123,15 @@ Collector create(Collector in) { @Override void postProcess(QuerySearchResult result) { - final int totalHitCount = hitCountSupplier.getAsInt(); - result.topDocs(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS, Float.NaN), null); + final TotalHits totalHitCount = hitCountSupplier.get(); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(totalHitCount, Lucene.EMPTY_SCORE_DOCS), Float.NaN), null); } } static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext { private final DocValueFormat[] sortFmt; private final CollapsingTopDocsCollector topDocsCollector; + private final Supplier maxScoreSupplier; /** * Ctr @@ -144,7 +149,15 @@ private CollapsingTopDocsCollectorContext(CollapseContext collapseContext, assert collapseContext != null; Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort; this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats; - this.topDocsCollector = collapseContext.createTopDocs(sort, numHits, trackMaxScore); + this.topDocsCollector = collapseContext.createTopDocs(sort, numHits); + + MaxScoreCollector maxScoreCollector = null; + if (trackMaxScore) { + maxScoreCollector = new MaxScoreCollector(); + maxScoreSupplier = maxScoreCollector::getMaxScore; + } else { + maxScoreSupplier = () -> Float.NaN; + } } @Override @@ -155,15 +168,17 @@ Collector create(Collector in) throws IOException { @Override void postProcess(QuerySearchResult result) throws IOException { - result.topDocs(topDocsCollector.getTopDocs(), sortFmt); + CollapseTopFieldDocs topDocs = topDocsCollector.getTopDocs(); + result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortFmt); } } abstract static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext { private final @Nullable SortAndFormats sortAndFormats; private final Collector collector; - private final IntSupplier totalHitsSupplier; + private final Supplier totalHitsSupplier; private final Supplier topDocsSupplier; + private final Supplier maxScoreSupplier; /** * Ctr @@ -187,37 +202,52 @@ private SimpleTopDocsCollectorContext(IndexReader reader, super(REASON_SEARCH_TOP_HITS, numHits); this.sortAndFormats = sortAndFormats; if (sortAndFormats == null) { - final TopDocsCollector topDocsCollector = TopScoreDocCollector.create(numHits, searchAfter); + final TopDocsCollector topDocsCollector = TopScoreDocCollector.create(numHits, searchAfter, Integer.MAX_VALUE); this.collector = topDocsCollector; - this.topDocsSupplier = topDocsCollector::topDocs; - this.totalHitsSupplier = topDocsCollector::getTotalHits; + this.topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); + this.totalHitsSupplier = () -> topDocsSupplier.get().totalHits; + this.maxScoreSupplier = () -> { + TopDocs topDocs = topDocsSupplier.get(); + if (topDocs.scoreDocs.length == 0) { + return Float.NaN; + } else { + return topDocs.scoreDocs[0].score; + } + }; } else { /** * We explicitly don't track total hits in the topdocs collector, it can early terminate * if the sort matches the index sort. */ final TopDocsCollector topDocsCollector = TopFieldCollector.create(sortAndFormats.sort, numHits, - (FieldDoc) searchAfter, true, trackMaxScore, trackMaxScore, false); - this.topDocsSupplier = topDocsCollector::topDocs; + (FieldDoc) searchAfter, 1); + this.topDocsSupplier = new CachedSupplier<>(topDocsCollector::topDocs); + TotalHitCountCollector hitCountCollector = null; if (trackTotalHits) { // implicit total hit counts are valid only when there is no filter collector in the chain int count = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query); if (count != -1) { // we can extract the total count from the shard statistics directly - this.totalHitsSupplier = () -> count; - this.collector = topDocsCollector; + this.totalHitsSupplier = () -> new TotalHits(count, TotalHits.Relation.EQUAL_TO); } else { // wrap a collector that counts the total number of hits even // if the top docs collector terminates early final TotalHitCountCollector countingCollector = new TotalHitCountCollector(); - this.collector = MultiCollector.wrap(topDocsCollector, countingCollector); - this.totalHitsSupplier = countingCollector::getTotalHits; + hitCountCollector = countingCollector; + this.totalHitsSupplier = () -> new TotalHits(countingCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO); } } else { // total hit count is not needed - this.collector = topDocsCollector; - this.totalHitsSupplier = topDocsCollector::getTotalHits; + this.totalHitsSupplier = () -> topDocsSupplier.get().totalHits; + } + MaxScoreCollector maxScoreCollector = null; + if (trackMaxScore) { + maxScoreCollector = new MaxScoreCollector(); + maxScoreSupplier = maxScoreCollector::getMaxScore; + } else { + maxScoreSupplier = () -> Float.NaN; } + collector = MultiCollector.wrap(topDocsCollector, hitCountCollector, maxScoreCollector); } } @@ -230,8 +260,8 @@ Collector create(Collector in) { @Override void postProcess(QuerySearchResult result) throws IOException { final TopDocs topDocs = topDocsSupplier.get(); - topDocs.totalHits = totalHitsSupplier.getAsInt(); - result.topDocs(topDocs, sortAndFormats == null ? null : sortAndFormats.formats); + topDocs.totalHits = totalHitsSupplier.get(); + result.topDocs(new TopDocsAndMaxScore(topDocs, maxScoreSupplier.get()), sortAndFormats == null ? null : sortAndFormats.formats); } } @@ -257,22 +287,22 @@ private ScrollingTopDocsCollectorContext(IndexReader reader, @Override void postProcess(QuerySearchResult result) throws IOException { super.postProcess(result); - final TopDocs topDocs = result.topDocs(); - if (scrollContext.totalHits == -1) { + final TopDocsAndMaxScore topDocs = result.topDocs(); + if (scrollContext.totalHits == null) { // first round - scrollContext.totalHits = topDocs.totalHits; - scrollContext.maxScore = topDocs.getMaxScore(); + scrollContext.totalHits = topDocs.topDocs.totalHits; + scrollContext.maxScore = topDocs.maxScore; } else { // subsequent round: the total number of hits and // the maximum score were computed on the first round - topDocs.totalHits = scrollContext.totalHits; - topDocs.setMaxScore(scrollContext.maxScore); + topDocs.topDocs.totalHits = scrollContext.totalHits; + topDocs.maxScore = scrollContext.maxScore; } if (numberOfShards == 1) { // if we fetch the document in the same roundtrip, we already know the last emitted doc - if (topDocs.scoreDocs.length > 0) { + if (topDocs.topDocs.scoreDocs.length > 0) { // set the last emitted doc - scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1]; + scrollContext.lastEmittedDoc = topDocs.topDocs.scoreDocs[topDocs.topDocs.scoreDocs.length - 1]; } } result.topDocs(topDocs, result.sortValueFormats()); @@ -334,8 +364,7 @@ static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searc } else if (searchContext.collapse() != null) { boolean trackScores = searchContext.sort() == null ? true : searchContext.trackScores(); int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); - return new CollapsingTopDocsCollectorContext(searchContext.collapse(), - searchContext.sort(), numDocs, trackScores); + return new CollapsingTopDocsCollectorContext(searchContext.collapse(), searchContext.sort(), numDocs, trackScores); } else { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 4a9567a32c06a..61bd150291d9f 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TopDocs; import java.io.IOException; @@ -41,7 +42,7 @@ public final class QueryRescorer implements Rescorer { public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext rescoreContext) throws IOException { assert rescoreContext != null; - if (topDocs == null || topDocs.totalHits == 0 || topDocs.scoreDocs.length == 0) { + if (topDocs == null || topDocs.scoreDocs.length == 0) { return topDocs; } @@ -87,7 +88,7 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon Explanation prim; if (sourceExplanation.isMatch()) { prim = Explanation.match( - sourceExplanation.getValue() * primaryWeight, + sourceExplanation.getValue().floatValue() * primaryWeight, "product of:", sourceExplanation, Explanation.match(primaryWeight, "primaryWeight")); } else { prim = Explanation.noMatch("First pass did not match", sourceExplanation); @@ -99,12 +100,12 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon if (rescoreExplain != null && rescoreExplain.isMatch()) { float secondaryWeight = rescore.rescoreQueryWeight(); Explanation sec = Explanation.match( - rescoreExplain.getValue() * secondaryWeight, + rescoreExplain.getValue().floatValue() * secondaryWeight, "product of:", rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight")); QueryRescoreMode scoreMode = rescore.scoreMode(); return Explanation.match( - scoreMode.combine(prim.getValue(), sec.getValue()), + scoreMode.combine(prim.getValue().floatValue(), sec.getValue().floatValue()), scoreMode + " of:", prim, sec); } @@ -123,15 +124,14 @@ public int compare(ScoreDoc o1, ScoreDoc o2) { /** Returns a new {@link TopDocs} with the topN from the incoming one, or the same TopDocs if the number of hits is already <= * topN. */ private TopDocs topN(TopDocs in, int topN) { - if (in.totalHits < topN) { - assert in.scoreDocs.length == in.totalHits; + if (in.scoreDocs.length < topN) { return in; } ScoreDoc[] subset = new ScoreDoc[topN]; System.arraycopy(in.scoreDocs, 0, subset, 0, topN); - return new TopDocs(in.totalHits, subset, in.getMaxScore()); + return new TopDocs(in.totalHits, subset); } /** Modifies incoming TopDocs (in) by replacing the top hits with resorted's hits, and then resorting all hits. */ @@ -151,8 +151,6 @@ private TopDocs combine(TopDocs in, TopDocs resorted, QueryRescoreContext ctx) { // incoming first pass hits, instead of allowing recoring of just the top subset: Arrays.sort(in.scoreDocs, SCORE_DOC_COMPARATOR); } - // update the max score after the resort - in.setMaxScore(in.scoreDocs[0].score); return in; } @@ -206,7 +204,8 @@ public void setScoreMode(String scoreMode) { @Override public void extractTerms(IndexSearcher searcher, RescoreContext rescoreContext, Set termsSet) throws IOException { - searcher.createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet); + Query query = ((QueryRescoreContext) rescoreContext).query(); + searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f).extractTerms(termsSet); } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 7baaa61bbb8c5..7f5a1be285d8e 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.internal.SearchContext; @@ -44,15 +45,19 @@ public void preProcess(SearchContext context) { @Override public void execute(SearchContext context) { + TopDocs topDocs = context.queryResult().topDocs().topDocs; + if (topDocs.scoreDocs.length == 0) { + return; + } try { - TopDocs topDocs = context.queryResult().topDocs(); for (RescoreContext ctx : context.rescore()) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); // It is the responsibility of the rescorer to sort the resulted top docs, // here we only assert that this condition is met. assert context.sort() == null && topDocsSortedByScore(topDocs): "topdocs should be sorted after rescore"; } - context.queryResult().topDocs(topDocs, context.queryResult().sortValueFormats()); + context.queryResult().topDocs(new TopDocsAndMaxScore(topDocs, topDocs.scoreDocs[0].score), + context.queryResult().sortValueFormats()); } catch (IOException e) { throw new ElasticsearchException("Rescore Phase Failed", e); } diff --git a/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java index c1aaad04d1d49..f2cf854947fd8 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/elasticsearch/search/slice/DocValuesSliceQuery.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.ConstantScoreWeight; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; @@ -45,7 +46,7 @@ public DocValuesSliceQuery(String field, int id, int max) { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override diff --git a/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java b/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java index da1b98822cf19..1a10770fe9d2b 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java +++ b/server/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Weight; @@ -55,7 +56,7 @@ public TermsSliceQuery(String field, int id, int max) { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override public Scorer scorer(LeafReaderContext context) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index 5690acd7abd97..7dc63a8daac78 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -84,7 +84,7 @@ protected Suggest.Suggestion suggestDocs = new ArrayList(size); + final List suggestDocs = new ArrayList<>(size); final CharArraySet seenSurfaceForms = doSkipDuplicates() ? new CharArraySet(size, false) : null; for (TopSuggestDocs.SuggestScoreDoc suggestEntry : entries.scoreLookupDocs()) { final SuggestDoc suggestDoc; @@ -209,8 +209,8 @@ public TopSuggestDocs get() throws IOException { } suggestDocs.add(suggestDoc); } - return new TopSuggestDocs((int) entries.totalHits, - suggestDocs.toArray(new TopSuggestDocs.SuggestScoreDoc[0]), entries.getMaxScore()); + return new TopSuggestDocs(entries.totalHits, + suggestDocs.toArray(new TopSuggestDocs.SuggestScoreDoc[0])); } } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java index 7b7584f4674cc..6fdff8d18eba0 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorBuilder.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.apache.lucene.search.spell.StringDistance; @@ -466,7 +466,7 @@ static StringDistance resolveDistance(String distanceVal) { } else if ("damerau_levenshtein".equals(distanceVal)) { return new LuceneLevenshteinDistance(); } else if ("levenshtein".equals(distanceVal)) { - return new LevensteinDistance(); + return new LevenshteinDistance(); } else if ("jaro_winkler".equals(distanceVal)) { return new JaroWinklerDistance(); } else if ("ngram".equals(distanceVal)) { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index fdc31dd6c2fca..ad6a8b4acf354 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.apache.lucene.search.spell.StringDistance; @@ -548,7 +548,7 @@ public StringDistance toLucene() { LEVENSHTEIN { @Override public StringDistance toLucene() { - return new LevensteinDistance(); + return new LevenshteinDistance(); } }, /** String distance algorithm based on Jaro-Winkler algorithm. */ diff --git a/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java b/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java index bce5965e50b6b..50c80b8e4350d 100644 --- a/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java +++ b/server/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java @@ -28,23 +28,26 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.CheckHits; import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.Weight; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.apache.lucene.search.grouping.CollapsingTopDocsCollector; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -54,7 +57,6 @@ import java.util.List; import java.util.Set; -import static org.hamcrest.core.IsEqual.equalTo; public class CollapsingTopDocsCollectorTests extends ESTestCase { private static class SegmentSearcher extends IndexSearcher { @@ -84,15 +86,12 @@ interface CollapsingDocValuesProducer> { } > void assertSearchCollapse(CollapsingDocValuesProducer dvProducers, boolean numeric) throws IOException { - assertSearchCollapse(dvProducers, numeric, true, true); - assertSearchCollapse(dvProducers, numeric, true, false); - assertSearchCollapse(dvProducers, numeric, false, true); - assertSearchCollapse(dvProducers, numeric, false, false); + assertSearchCollapse(dvProducers, numeric, true); + assertSearchCollapse(dvProducers, numeric, false); } private > void assertSearchCollapse(CollapsingDocValuesProducer dvProducers, - boolean numeric, boolean multivalued, - boolean trackMaxScores) throws IOException { + boolean numeric, boolean multivalued) throws IOException { final int numDocs = randomIntBetween(1000, 2000); int maxGroup = randomIntBetween(2, 500); final Directory dir = newDirectory(); @@ -123,29 +122,25 @@ private > void assertSearchCollapse(CollapsingDocValuesP final CollapsingTopDocsCollector collapsingCollector; if (numeric) { collapsingCollector = - CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups); } else { collapsingCollector = - CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups); } TopFieldCollector topFieldCollector = - TopFieldCollector.create(sort, totalHits, true, trackMaxScores, trackMaxScores, true); - - searcher.search(new MatchAllDocsQuery(), collapsingCollector); - searcher.search(new MatchAllDocsQuery(), topFieldCollector); + TopFieldCollector.create(sort, totalHits, Integer.MAX_VALUE); + Query query = new MatchAllDocsQuery(); + searcher.search(query, collapsingCollector); + searcher.search(query, topFieldCollector); CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs(); TopFieldDocs topDocs = topFieldCollector.topDocs(); assertEquals(collapseField.getField(), collapseTopFieldDocs.field); assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length); - assertEquals(totalHits, collapseTopFieldDocs.totalHits); + assertEquals(totalHits, collapseTopFieldDocs.totalHits.value); + assertEquals(TotalHits.Relation.EQUAL_TO, collapseTopFieldDocs.totalHits.relation); assertEquals(totalHits, topDocs.scoreDocs.length); - assertEquals(totalHits, topDocs.totalHits); - if (trackMaxScores) { - assertThat(collapseTopFieldDocs.getMaxScore(), equalTo(topDocs.getMaxScore())); - } else { - assertThat(collapseTopFieldDocs.getMaxScore(), equalTo(Float.NaN)); - } + assertEquals(totalHits, topDocs.totalHits.value); Set seen = new HashSet<>(); // collapse field is the last sort @@ -170,7 +165,6 @@ private > void assertSearchCollapse(CollapsingDocValuesP assertTrue(seen.contains(fieldDoc.fields[collapseIndex])); } - // check merge final IndexReaderContext ctx = searcher.getTopReaderContext(); final SegmentSearcher[] subSearchers; @@ -196,27 +190,27 @@ private > void assertSearchCollapse(CollapsingDocValuesP } final CollapseTopFieldDocs[] shardHits = new CollapseTopFieldDocs[subSearchers.length]; - final Weight weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), true); + final Weight weight = searcher.createWeight(searcher.rewrite(new MatchAllDocsQuery()), ScoreMode.COMPLETE, 1f); for (int shardIDX = 0; shardIDX < subSearchers.length; shardIDX++) { final SegmentSearcher subSearcher = subSearchers[shardIDX]; final CollapsingTopDocsCollector c; if (numeric) { - c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups); } else { - c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, trackMaxScores); + c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups); } subSearcher.search(weight, c); shardHits[shardIDX] = c.getTopDocs(); } CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits, true); - assertTopDocsEquals(mergedFieldDocs, collapseTopFieldDocs); + assertTopDocsEquals(query, mergedFieldDocs, collapseTopFieldDocs); w.close(); reader.close(); dir.close(); } - private static void assertTopDocsEquals(CollapseTopFieldDocs topDocs1, CollapseTopFieldDocs topDocs2) { - TestUtil.assertEquals(topDocs1, topDocs2); + private static void assertTopDocsEquals(Query query, CollapseTopFieldDocs topDocs1, CollapseTopFieldDocs topDocs2) { + CheckHits.checkEqual(query, topDocs1.scoreDocs, topDocs2.scoreDocs); assertArrayEquals(topDocs1.collapseValues, topDocs2.collapseValues); } @@ -384,7 +378,7 @@ public void testEmptyNumericSegment() throws Exception { sortField.setMissingValue(Long.MAX_VALUE); Sort sort = new Sort(sortField); final CollapsingTopDocsCollector collapsingCollector = - CollapsingTopDocsCollector.createNumeric("group", sort, 10, false); + CollapsingTopDocsCollector.createNumeric("group", sort, 10); searcher.search(new MatchAllDocsQuery(), collapsingCollector); CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs(); assertEquals(4, collapseTopFieldDocs.scoreDocs.length); @@ -420,7 +414,7 @@ public void testEmptySortedSegment() throws Exception { final IndexSearcher searcher = newSearcher(reader); Sort sort = new Sort(new SortField("group", SortField.Type.STRING_VAL)); final CollapsingTopDocsCollector collapsingCollector = - CollapsingTopDocsCollector.createKeyword("group", sort, 10, false); + CollapsingTopDocsCollector.createKeyword("group", sort, 10); searcher.search(new MatchAllDocsQuery(), collapsingCollector); CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs(); assertEquals(4, collapseTopFieldDocs.scoreDocs.length); diff --git a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 8f96936e43b55..5b37b4bf48178 100644 --- a/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.similarities.BM25Similarity; @@ -63,15 +64,12 @@ public void testDismaxQuery() throws IOException { "generator", "foo fighers - generator", "foo fighters generator" }; final boolean omitNorms = random().nextBoolean(); + final boolean omitFreqs = random().nextBoolean(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); - ft.setIndexOptions(random().nextBoolean() ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS); + ft.setIndexOptions(omitFreqs ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS); ft.setOmitNorms(omitNorms); ft.freeze(); - FieldType ft1 = new FieldType(TextField.TYPE_NOT_STORED); - ft1.setIndexOptions(random().nextBoolean() ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS); - ft1.setOmitNorms(omitNorms); - ft1.freeze(); for (int i = 0; i < username.length; i++) { Document d = new Document(); d.add(new TextField("id", Integer.toString(i), Field.Store.YES)); @@ -83,8 +81,8 @@ public void testDismaxQuery() throws IOException { for (int j = 0; j < iters; j++) { Document d = new Document(); d.add(new TextField("id", Integer.toString(username.length + j), Field.Store.YES)); - d.add(new Field("username", "foo fighters", ft1)); - d.add(new Field("song", "some bogus text to bump up IDF", ft1)); + d.add(new Field("username", "foo fighters", ft)); + d.add(new Field("song", "some bogus text to bump up IDF", ft)); w.addDocument(d); } w.commit(); @@ -167,7 +165,7 @@ public void testExtractTerms() throws IOException { BlendedTermQuery blendedTermQuery = BlendedTermQuery.dismaxBlendedQuery(terms.toArray(new Term[0]), random().nextFloat()); Set extracted = new HashSet<>(); IndexSearcher searcher = new IndexSearcher(new MultiReader()); - searcher.createNormalizedWeight(blendedTermQuery, false).extractTerms(extracted); + searcher.createWeight(searcher.rewrite(blendedTermQuery), ScoreMode.COMPLETE_NO_SCORES, 1f).extractTerms(extracted); assertThat(extracted.size(), equalTo(terms.size())); assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0]))); } diff --git a/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java index 2def2702d38b3..ec468fd8d9b89 100644 --- a/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/apache/lucene/queries/InetAddressRandomBinaryDocValuesRangeQueryTests.java @@ -19,7 +19,7 @@ package org.apache.lucene.queries; import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util.FutureArrays; import org.elasticsearch.index.mapper.RangeFieldMapper; import java.net.InetAddress; @@ -44,7 +44,7 @@ protected Range nextRange(int dimensions) throws Exception { byte[] bMin = InetAddressPoint.encode(min); InetAddress max = nextInetaddress(); byte[] bMax = InetAddressPoint.encode(max); - if (StringHelper.compare(bMin.length, bMin, 0, bMax, 0) > 0) { + if (FutureArrays.compareUnsigned(bMin, 0, bMin.length, bMax, 0, bMin.length) > 0) { return new IpRange(max, min); } return new IpRange(min, max); @@ -91,7 +91,7 @@ protected void setMin(int dim, Object val) { InetAddress v = (InetAddress)val; byte[] e = InetAddressPoint.encode(v); - if (StringHelper.compare(e.length, min, 0, e, 0) < 0) { + if (FutureArrays.compareUnsigned(min, 0, e.length, e, 0, e.length) < 0) { max = e; maxAddress = v; } else { @@ -111,7 +111,7 @@ protected void setMax(int dim, Object val) { InetAddress v = (InetAddress)val; byte[] e = InetAddressPoint.encode(v); - if (StringHelper.compare(e.length, max, 0, e, 0) > 0) { + if (FutureArrays.compareUnsigned(max, 0, e.length, e, 0, e.length) > 0) { min = e; minAddress = v; } else { @@ -123,22 +123,22 @@ protected void setMax(int dim, Object val) { @Override protected boolean isDisjoint(Range o) { IpRange other = (IpRange) o; - return StringHelper.compare(min.length, min, 0, other.max, 0) > 0 || - StringHelper.compare(max.length, max, 0, other.min, 0) < 0; + return FutureArrays.compareUnsigned(min, 0, min.length, other.max, 0, min.length) > 0 || + FutureArrays.compareUnsigned(max, 0, max.length, other.min, 0, max.length) < 0; } @Override protected boolean isWithin(Range o) { IpRange other = (IpRange)o; - return StringHelper.compare(min.length, min, 0, other.min, 0) >= 0 && - StringHelper.compare(max.length, max, 0, other.max, 0) <= 0; + return FutureArrays.compareUnsigned(min, 0, min.length, other.min, 0, min.length) >= 0 && + FutureArrays.compareUnsigned(max, 0, max.length, other.max, 0, max.length) <= 0; } @Override protected boolean contains(Range o) { IpRange other = (IpRange)o; - return StringHelper.compare(min.length, min, 0, other.min, 0) <= 0 && - StringHelper.compare(max.length, max, 0, other.max, 0) >= 0; + return FutureArrays.compareUnsigned(min, 0, min.length, other.min, 0, min.length) <= 0 && + FutureArrays.compareUnsigned(max, 0, max.length, other.max, 0, max.length) >= 0; } } diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 796553034fb38..a6e676006fdbf 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -74,7 +74,7 @@ private void assertHighlightOneDoc(String fieldName, String[] inputs, Analyzer a IndexSearcher searcher = newSearcher(reader); iw.close(); TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); String rawValue = Strings.arrayToDelimitedString(inputs, String.valueOf(MULTIVAL_SEP_CHAR)); CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, analyzer, null, new CustomPassageFormatter("", "", new DefaultEncoder()), locale, diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 4c7dc9eb094b7..c0d29e86fd60b 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -208,9 +208,9 @@ public void testIsBeta() { public void testIsAlpha() { - assertTrue(new Version(5000001, org.apache.lucene.util.Version.LUCENE_6_0_0).isAlpha()); - assertFalse(new Version(4000002, org.apache.lucene.util.Version.LUCENE_6_0_0).isAlpha()); - assertTrue(new Version(4000002, org.apache.lucene.util.Version.LUCENE_6_0_0).isBeta()); + assertTrue(new Version(5000001, org.apache.lucene.util.Version.LUCENE_7_0_0).isAlpha()); + assertFalse(new Version(4000002, org.apache.lucene.util.Version.LUCENE_7_0_0).isAlpha()); + assertTrue(new Version(4000002, org.apache.lucene.util.Version.LUCENE_7_0_0).isBeta()); assertTrue(Version.fromString("5.0.0-alpha14").isAlpha()); assertEquals(5000014, Version.fromString("5.0.0-alpha14").id); assertTrue(Version.fromId(5000015).isAlpha()); @@ -226,7 +226,6 @@ public void testIsAlpha() { } } - public void testParseVersion() { final int iters = scaledRandomIntBetween(100, 1000); for (int i = 0; i < iters; i++) { diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index c1f729a12ca2b..59f4e2633a600 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -22,7 +22,9 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -68,13 +70,17 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node2", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), + new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else { @@ -97,12 +103,12 @@ public void run() throws IOException { assertNotNull(responseRef.get()); assertNotNull(responseRef.get().get(0)); assertNull(responseRef.get().get(0).fetchResult()); - assertEquals(1, responseRef.get().get(0).queryResult().topDocs().totalHits); - assertEquals(42, responseRef.get().get(0).queryResult().topDocs().scoreDocs[0].doc); + assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertNotNull(responseRef.get().get(1)); assertNull(responseRef.get().get(1).fetchResult()); - assertEquals(1, responseRef.get().get(1).queryResult().topDocs().totalHits); - assertEquals(84, responseRef.get().get(1).queryResult().topDocs().scoreDocs[0].doc); + assertEquals(1, responseRef.get().get(1).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(84, responseRef.get().get(1).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); assertEquals(2, mockSearchPhaseContext.numSuccess.get()); } @@ -126,7 +132,9 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs( + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { @@ -151,8 +159,8 @@ public void run() throws IOException { assertNotNull(responseRef.get()); assertNotNull(responseRef.get().get(0)); assertNull(responseRef.get().get(0).fetchResult()); - assertEquals(1, responseRef.get().get(0).queryResult().topDocs().totalHits); - assertEquals(42, responseRef.get().get(0).queryResult().topDocs().scoreDocs[0].doc); + assertEquals(1, responseRef.get().get(0).queryResult().topDocs().topDocs.totalHits.value); + assertEquals(42, responseRef.get().get(0).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertNull(responseRef.get().get(1)); assertEquals(1, mockSearchPhaseContext.numSuccess.get()); @@ -183,7 +191,9 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 7f4fbc9115791..3f166446a0369 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -20,7 +20,9 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.Index; @@ -55,7 +57,8 @@ public void testShortcutQueryAndFetchOptimization() throws IOException { final int numHits; if (hasHits) { QuerySearchResult queryResult = new QuerySearchResult(); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 1.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 1.0F), new DocValueFormat[0]); queryResult.size(1); FetchSearchResult fetchResult = new FetchSearchResult(); fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(42)}, 1, 1.0F)); @@ -94,13 +97,15 @@ public void testFetchTwoDocument() throws IOException { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); @@ -149,13 +154,15 @@ public void testFailFetchOneDoc() throws IOException { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); @@ -209,7 +216,8 @@ public void testFetchDocsConcurrently() throws IOException, InterruptedException AtomicReference responseRef = new AtomicReference<>(); for (int i = 0; i < numHits; i++) { QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(i+1, i)}, i), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(i+1, i)}), i), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(i); results.consumeResult(queryResult); @@ -265,13 +273,15 @@ public void testExceptionFailsPhase() throws IOException { AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); @@ -319,13 +329,15 @@ public void testCleanupIrrelevantContexts() throws IOException { // contexts tha AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = 1; QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); - queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); + queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); queryResult.setShardIndex(1); results.consumeResult(queryResult); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 393c45fa57242..04fd258fa1596 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -22,6 +22,9 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.TotalHits.Relation; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; @@ -187,11 +190,11 @@ private AtomicArray generateQueryResults(int nShards, for (int shardIndex = 0; shardIndex < nShards; shardIndex++) { QuerySearchResult querySearchResult = new QuerySearchResult(shardIndex, new SearchShardTarget("", new Index("", ""), shardIndex, null)); - TopDocs topDocs = new TopDocs(0, new ScoreDoc[0], 0); + TopDocs topDocs = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + float maxScore = 0; if (searchHitsSize > 0) { int nDocs = randomIntBetween(0, searchHitsSize); ScoreDoc[] scoreDocs = new ScoreDoc[nDocs]; - float maxScore = 0F; for (int i = 0; i < nDocs; i++) { float score = useConstantScore ? 1.0F : Math.abs(randomFloat()); scoreDocs[i] = new ScoreDoc(i, score); @@ -199,7 +202,7 @@ private AtomicArray generateQueryResults(int nShards, maxScore = score; } } - topDocs = new TopDocs(scoreDocs.length, scoreDocs, maxScore); + topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs); } List shardSuggestion = new ArrayList<>(); for (CompletionSuggestion completionSuggestion : suggestions) { @@ -208,19 +211,19 @@ private AtomicArray generateQueryResults(int nShards, final CompletionSuggestion.Entry completionEntry = new CompletionSuggestion.Entry(new Text(""), 0, 5); suggestion.addTerm(completionEntry); int optionSize = randomIntBetween(1, suggestion.getSize()); - float maxScore = randomIntBetween(suggestion.getSize(), (int) Float.MAX_VALUE); + float maxScoreValue = randomIntBetween(suggestion.getSize(), (int) Float.MAX_VALUE); for (int i = 0; i < optionSize; i++) { - completionEntry.addOption(new CompletionSuggestion.Entry.Option(i, new Text(""), maxScore, + completionEntry.addOption(new CompletionSuggestion.Entry.Option(i, new Text(""), maxScoreValue, Collections.emptyMap())); float dec = randomIntBetween(0, optionSize); - if (dec <= maxScore) { - maxScore -= dec; + if (dec <= maxScoreValue) { + maxScoreValue -= dec; } } suggestion.setShardIndex(shardIndex); shardSuggestion.add(suggestion); } - querySearchResult.topDocs(topDocs, null); + querySearchResult.topDocs(new TopDocsAndMaxScore(topDocs, maxScore), null); querySearchResult.size(searchHitsSize); querySearchResult.suggest(new Suggest(new ArrayList<>(shardSuggestion))); querySearchResult.setShardIndex(shardIndex); @@ -232,7 +235,9 @@ private AtomicArray generateQueryResults(int nShards, private int getTotalQueryHits(AtomicArray results) { int resultCount = 0; for (SearchPhaseResult shardResult : results.asList()) { - resultCount += shardResult.queryResult().topDocs().totalHits; + TopDocs topDocs = shardResult.queryResult().topDocs().topDocs; + assert topDocs.totalHits.relation == Relation.EQUAL_TO; + resultCount += topDocs.totalHits.value; } return resultCount; } @@ -292,7 +297,8 @@ public void testConsumer() { request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); QuerySearchResult result = new QuerySearchResult(0, new SearchShardTarget("node", new Index("a", "b"), 0, null)); - result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 1.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -300,7 +306,8 @@ public void testConsumer() { consumer.consumeResult(result); result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0, null)); - result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0]); aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 3.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -308,7 +315,8 @@ public void testConsumer() { consumer.consumeResult(result); result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0, null)); - result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), + new DocValueFormat[0]); aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 2.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -347,7 +355,9 @@ public void testConsumerConcurrently() throws InterruptedException { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); - result.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(0, number)}, number), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore( + new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(0, number)}), number), + new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -384,7 +394,8 @@ public void testConsumerOnlyAggs() throws InterruptedException { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); - result.topDocs(new TopDocs(1, new ScoreDoc[0], number), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), + new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); @@ -417,7 +428,8 @@ public void testConsumerOnlyHits() throws InterruptedException { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); - result.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(0, number)}, number), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), + new ScoreDoc[] {new ScoreDoc(0, number)}), number), new DocValueFormat[0]); result.setShardIndex(id); result.size(1); consumer.consumeResult(result); @@ -477,7 +489,8 @@ public void testReduceTopNWithFromOffset() { for (int j = 0; j < docs.length; j++) { docs[j] = new ScoreDoc(0, score--); } - result.topDocs(new TopDocs(3, docs, docs[0].score), new DocValueFormat[0]); + result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(3, TotalHits.Relation.EQUAL_TO), docs), docs[0].score), + new DocValueFormat[0]); result.setShardIndex(i); result.size(5); result.from(5); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index 890f6ef163b33..b677247f266cd 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -205,10 +206,10 @@ public void testPruneUnreferencedFiles() throws IOException { assertEquals(3, open.maxDoc()); IndexSearcher s = new IndexSearcher(open); - assertEquals(s.search(new TermQuery(new Term("id", "1")), 1).totalHits, 1); - assertEquals(s.search(new TermQuery(new Term("id", "2")), 1).totalHits, 1); - assertEquals(s.search(new TermQuery(new Term("id", "3")), 1).totalHits, 1); - assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); + assertEquals(s.search(new TermQuery(new Term("id", "1")), 1).totalHits.value, 1); + assertEquals(s.search(new TermQuery(new Term("id", "2")), 1).totalHits.value, 1); + assertEquals(s.search(new TermQuery(new Term("id", "3")), 1).totalHits.value, 1); + assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits.value, 0); for (String file : dir.listAll()) { assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); @@ -381,7 +382,7 @@ public void testAsSequentialAccessBits() throws Exception { try (DirectoryReader reader = DirectoryReader.open(w)) { IndexSearcher searcher = newSearcher(reader); - Weight termWeight = new TermQuery(new Term("foo", "bar")).createWeight(searcher, false, 1f); + Weight termWeight = new TermQuery(new Term("foo", "bar")).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1f); assertEquals(1, reader.leaves().size()); LeafReaderContext leafReaderContext = searcher.getIndexReader().leaves().get(0); Bits bits = Lucene.asSequentialAccessBits(leafReaderContext.reader().maxDoc(), termWeight.scorerSupplier(leafReaderContext)); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java index 6ebb604725d6c..d60458cf82642 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/function/MinScoreScorerTests.java @@ -103,6 +103,11 @@ public float score() throws IOException { final int idx = Arrays.binarySearch(docs, docID()); return scores[idx]; } + + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } }; } diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index 0475c324f0648..7d01b3992fcbd 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -62,7 +62,7 @@ public void testVectorHighlighter() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), @@ -88,7 +88,7 @@ public void testVectorHighlighterPrefixQuery() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); @@ -129,7 +129,7 @@ public void testVectorHighlighterNoStore() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), @@ -150,7 +150,7 @@ public void testVectorHighlighterNoTermVector() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - assertThat(topDocs.totalHits, equalTo(1L)); + assertThat(topDocs.totalHits.value, equalTo(1L)); FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 000722863887c..75ff1ac1259d2 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.index.AssertingDirectoryReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.IndexSearcher; @@ -432,13 +431,8 @@ public long computeNorm(FieldInvertState state) { } @Override - public SimWeight computeWeight(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { - return delegate.computeWeight(boost, collectionStats, termStats); - } - - @Override - public SimScorer simScorer(SimWeight weight, LeafReaderContext context) throws IOException { - return delegate.simScorer(weight, context); + public SimScorer scorer(float boost, CollectionStatistics collectionStats, TermStatistics... termStats) { + return delegate.scorer(boost, collectionStats, termStats); } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index b0b4ec3930adf..bb5e0e90106ff 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -211,7 +211,7 @@ public void testRefreshActuallyWorks() throws Exception { // we are running on updateMetaData if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, search.totalHits); + assertEquals(1, search.totalHits.value); } }); assertFalse(refreshTask.isClosed()); @@ -224,7 +224,7 @@ public void testRefreshActuallyWorks() throws Exception { // this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(2, search.totalHits); + assertEquals(2, search.totalHits.value); } }); client().prepareIndex("test", "test", "2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); @@ -232,7 +232,7 @@ public void testRefreshActuallyWorks() throws Exception { // this one becomes visible due to the scheduled refresh try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(3, search.totalHits); + assertEquals(3, search.totalHits.value); } }); } diff --git a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java index f82f2c39f4470..ddb2b85748686 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -22,8 +22,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; -import org.apache.lucene.codecs.lucene62.Lucene62Codec; -import org.apache.lucene.codecs.lucene70.Lucene70Codec; +import org.apache.lucene.codecs.lucene80.Lucene80Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -54,8 +53,8 @@ public class CodecTests extends ESTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene70Codec.class)); - assertThat(codecService.codec("Lucene62"), instanceOf(Lucene62Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene80Codec.class)); + assertThat(codecService.codec("Lucene80"), instanceOf(Lucene80Codec.class)); } public void testDefault() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index d3aead9e44e16..be34db2b47f39 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -781,7 +781,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { recoveringEngine.recoverFromTranslog(Long.MAX_VALUE); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); - assertEquals(docs, topDocs.totalHits); + assertEquals(docs, topDocs.totalHits.value); } } finally { IOUtils.close(initialEngine, recoveringEngine, store); @@ -2706,7 +2706,7 @@ public void testSkipTranslogReplay() throws IOException { engine.skipTranslogRecovery(); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); - assertThat(topDocs.totalHits, equalTo(0L)); + assertThat(topDocs.totalHits.value, equalTo(0L)); } } } @@ -2782,14 +2782,14 @@ public void testTranslogReplay() throws IOException { assertThat(result.getVersion(), equalTo(2L)); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); - assertThat(topDocs.totalHits, equalTo(numDocs + 1L)); + assertThat(topDocs.totalHits.value, equalTo(numDocs + 1L)); } engine.close(); engine = createEngine(store, primaryTranslogDir, inSyncGlobalCheckpointSupplier); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1); - assertThat(topDocs.totalHits, equalTo(numDocs + 1L)); + assertThat(topDocs.totalHits.value, equalTo(numDocs + 1L)); } parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); assertEquals(flush ? 1 : 2, parser.appliedOperations()); @@ -2802,7 +2802,7 @@ public void testTranslogReplay() throws IOException { } try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs); - assertThat(topDocs.totalHits, equalTo((long) numDocs)); + assertThat(topDocs.totalHits.value, equalTo((long) numDocs)); } } @@ -3102,7 +3102,7 @@ public void testDoubleDeliveryPrimary() throws IOException { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } operation = appendOnlyPrimary(doc, false, 1); retry = appendOnlyPrimary(doc, true, 1); @@ -3123,7 +3123,7 @@ public void testDoubleDeliveryPrimary() throws IOException { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3167,7 +3167,7 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(0, topDocs.totalHits); + assertEquals(0, topDocs.totalHits.value); } } @@ -3212,7 +3212,7 @@ public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } operation = randomAppendOnly(doc.get(), false, 1); retry = randomAppendOnly(doc.get(), true, 1); @@ -3233,7 +3233,7 @@ public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3273,12 +3273,12 @@ public void testDoubleDeliveryReplica() throws IOException { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } List ops = readAllOperationsInLucene(engine, createMapperService("test")); assertThat(ops.stream().map(o -> o.seqNo()).collect(Collectors.toList()), hasItem(20L)); @@ -3305,7 +3305,7 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); @@ -3314,7 +3314,7 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3339,7 +3339,7 @@ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(), result.getVersion(), null, REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); @@ -3347,7 +3347,7 @@ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); } } @@ -3424,7 +3424,7 @@ public void testRetryConcurrently() throws InterruptedException, IOException { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(numDocs, topDocs.totalHits); + assertEquals(numDocs, topDocs.totalHits.value); } if (primary) { // primaries rely on lucene dedup and may index the same document twice @@ -3524,7 +3524,7 @@ public void run() { engine.refresh("test"); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10); - assertEquals(docs.size(), topDocs.totalHits); + assertEquals(docs.size(), topDocs.totalHits.value); } assertEquals(0, engine.getNumVersionLookups()); assertEquals(0, engine.getNumIndexVersionsLookups()); diff --git a/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java b/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java index f9641ba24d7ac..47946a6850c48 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SegmentTests.java @@ -77,7 +77,7 @@ static Segment randomSegment() { segment.sizeInBytes = randomNonNegativeLong(); segment.docCount = randomIntBetween(1, Integer.MAX_VALUE); segment.delDocCount = randomIntBetween(0, segment.docCount); - segment.version = Version.LUCENE_6_5_0; + segment.version = Version.LUCENE_7_0_0; segment.compound = randomBoolean(); segment.mergeId = randomAlphaOfLengthBetween(1, 10); segment.memoryInBytes = randomNonNegativeLong(); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java index cd1dc01d9ef4a..048455ccb41e2 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -115,7 +115,7 @@ public void testSingleValueAllSet() throws Exception { SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one())); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -126,7 +126,7 @@ public void testSingleValueAllSet() throws Exception { sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(2)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); assertThat(topDocs.scoreDocs[2].doc, equalTo(1)); @@ -192,7 +192,7 @@ public void testMultiValueAllSet() throws Exception { IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -200,7 +200,7 @@ public void testMultiValueAllSet() throws Exception { ; sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); @@ -259,7 +259,7 @@ public void testSortMultiValuesFields() throws Exception { indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(7)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08")); @@ -281,7 +281,7 @@ public void testSortMultiValuesFields() throws Exception { sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(6)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10")); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index 04cd13766176b..ef2a9b3873580 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -265,7 +265,7 @@ public void testActualMissingValue(boolean reverse) throws IOException { IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(missingValue, MultiValueMode.MIN, null, reverse); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); - assertEquals(numDocs, topDocs.totalHits); + assertEquals(numDocs, topDocs.totalHits.value); BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value"); @@ -319,7 +319,7 @@ public void testSortMissing(boolean first, boolean reverse) throws IOException { IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(first ? "_first" : "_last", MultiValueMode.MIN, null, reverse); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); - assertEquals(numDocs, topDocs.totalHits); + assertEquals(numDocs, topDocs.totalHits.value); BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index b7ee74fb773a0..23e205b8f58d7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -71,25 +71,25 @@ public void testDoubleIndexingSameDoc() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(mapperService.fullName("field1").termQuery("value1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field2").termQuery("1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field3").termQuery("1.1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field4").termQuery("2010-01-01", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field5").termQuery("1", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field5").termQuery("2", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); topDocs = searcher.search(mapperService.fullName("field5").termQuery("3", context), 10); - assertThat(topDocs.totalHits, equalTo(2L)); + assertThat(topDocs.totalHits.value, equalTo(2L)); writer.close(); reader.close(); dir.close(); diff --git a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java index 49cb4442beb8c..cdc65cce92708 100644 --- a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.BoostingQuery; +import org.apache.lucene.queries.function.FunctionScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -45,7 +45,7 @@ protected void doAssertLuceneQuery(BoostingQueryBuilder queryBuilder, Query quer if (positive == null || negative == null) { assertThat(query, nullValue()); } else { - assertThat(query, instanceOf(BoostingQuery.class)); + assertThat(query, instanceOf(FunctionScoreQuery.class)); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java index 98a5d91e1b195..ef98c67e56ed4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/DisMaxQueryBuilderTests.java @@ -50,7 +50,7 @@ protected DisMaxQueryBuilder doCreateTestQueryBuilder() { dismax.add(RandomQueryBuilder.createQuery(random())); } if (randomBoolean()) { - dismax.tieBreaker(2.0f / randomIntBetween(1, 20)); + dismax.tieBreaker((float) randomDoubleBetween(0d, 1d, true)); } return dismax; } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index b0ee32548737a..1cc058eb724b8 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -154,7 +154,7 @@ protected QueryStringQueryBuilder doCreateTestQueryBuilder() { queryStringQueryBuilder.quoteFieldSuffix(randomAlphaOfLengthBetween(1, 3)); } if (randomBoolean()) { - queryStringQueryBuilder.tieBreaker(randomFloat()); + queryStringQueryBuilder.tieBreaker((float) randomDoubleBetween(0d, 1d, true)); } if (randomBoolean()) { queryStringQueryBuilder.minimumShouldMatch(randomMinimumShouldMatch()); diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java index cfc423d918ad7..698cb71692b0f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java @@ -208,7 +208,7 @@ public void testDoToQuery() throws Exception { .setMinimumShouldMatchField("m_s_m").doToQuery(context); IndexSearcher searcher = new IndexSearcher(ir); TopDocs topDocs = searcher.search(query, 10, new Sort(SortField.FIELD_DOC)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(topDocs.scoreDocs[1].doc, equalTo(3)); assertThat(topDocs.scoreDocs[2].doc, equalTo(4)); @@ -254,7 +254,7 @@ public void testDoToQuery_msmScriptField() throws Exception { .setMinimumShouldMatchScript(script).doToQuery(context); IndexSearcher searcher = new IndexSearcher(ir); TopDocs topDocs = searcher.search(query, 10, new Sort(SortField.FIELD_DOC)); - assertThat(topDocs.totalHits, equalTo(3L)); + assertThat(topDocs.totalHits.value, equalTo(3L)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); assertThat(topDocs.scoreDocs[1].doc, equalTo(2)); assertThat(topDocs.scoreDocs[2].doc, equalTo(4)); diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index 112de76b43e21..a77d10f12eafa 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -320,7 +320,7 @@ public void testExplainFunctionScoreQuery() throws IOException { public Explanation getFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction scoreFunction) throws IOException { FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, CombineFunction.AVG,0.0f, 100); - Weight weight = searcher.createNormalizedWeight(functionScoreQuery, true); + Weight weight = searcher.createWeight(searcher.rewrite(functionScoreQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1f); Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0); return explanation.getDetails()[1]; } @@ -397,7 +397,7 @@ public Explanation getFiltersFunctionScoreExplanation(IndexSearcher searcher, Sc } protected Explanation getExplanation(IndexSearcher searcher, FunctionScoreQuery functionScoreQuery) throws IOException { - Weight weight = searcher.createNormalizedWeight(functionScoreQuery, true); + Weight weight = searcher.createWeight(searcher.rewrite(functionScoreQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1f); return weight.explain(searcher.getIndexReader().leaves().get(0), 0); } @@ -421,18 +421,19 @@ public void checkFiltersFunctionScoreExplanation(Explanation randomExplanation, assertThat(functionExplanation.getDetails()[1].getDescription(), equalTo(functionExpl)); } - private static float[] randomFloats(int size) { + private static float[] randomPositiveFloats(int size) { float[] values = new float[size]; for (int i = 0; i < values.length; i++) { - values[i] = randomFloat() * (randomBoolean() ? 1.0f : -1.0f) * randomInt(100) + 1.e-5f; + values[i] = randomFloat() * randomInt(100) + 1.e-5f; } return values; } - private static double[] randomDoubles(int size) { + private static double[] randomPositiveDoubles(int size) { double[] values = new double[size]; for (int i = 0; i < values.length; i++) { - values[i] = randomDouble() * (randomBoolean() ? 1.0d : -1.0d) * randomInt(100) + 1.e-5d; + double rand = randomValueOtherThanMany((d) -> Double.compare(d, 0) < 0, ESTestCase::randomDouble); + values[i] = rand * randomInt(100) + 1.e-5d; } return values; } @@ -478,8 +479,8 @@ protected int doHashCode() { public void testSimpleWeightedFunction() throws IOException, ExecutionException, InterruptedException { int numFunctions = randomIntBetween(1, 3); - float[] weights = randomFloats(numFunctions); - double[] scores = randomDoubles(numFunctions); + float[] weights = randomPositiveFloats(numFunctions); + double[] scores = randomPositiveDoubles(numFunctions); ScoreFunctionStub[] scoreFunctionStubs = new ScoreFunctionStub[numFunctions]; for (int i = 0; i < numFunctions; i++) { scoreFunctionStubs[i] = new ScoreFunctionStub(scores[i]); @@ -502,7 +503,7 @@ public void testSimpleWeightedFunction() throws IOException, ExecutionException, score *= weights[i] * scores[i]; } assertThat(scoreWithWeight / (float) score, is(1f)); - float explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + float explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -518,7 +519,7 @@ public void testSimpleWeightedFunction() throws IOException, ExecutionException, sum += weights[i] * scores[i]; } assertThat(scoreWithWeight / (float) sum, is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -536,7 +537,7 @@ public void testSimpleWeightedFunction() throws IOException, ExecutionException, sum += weights[i] * scores[i]; } assertThat(scoreWithWeight / (float) (sum / norm), is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -552,7 +553,7 @@ public void testSimpleWeightedFunction() throws IOException, ExecutionException, min = Math.min(min, weights[i] * scores[i]); } assertThat(scoreWithWeight / (float) min, is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); functionScoreQueryWithWeights = getFiltersFunctionScoreQuery( @@ -568,7 +569,7 @@ public void testSimpleWeightedFunction() throws IOException, ExecutionException, max = Math.max(max, weights[i] * scores[i]); } assertThat(scoreWithWeight / (float) max, is(1f)); - explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue(); + explainedScore = getExplanation(searcher, functionScoreQueryWithWeights).getValue().floatValue(); assertThat(explainedScore / scoreWithWeight, is(1f)); } @@ -587,7 +588,7 @@ public void testMinScoreExplain() throws IOException { FunctionScoreQuery fsq = new FunctionScoreQuery(query,0f, Float.POSITIVE_INFINITY); Explanation fsqExpl = searcher.explain(fsq, 0); assertTrue(fsqExpl.isMatch()); - assertEquals(queryExpl.getValue(), fsqExpl.getValue(), 0f); + assertEquals(queryExpl.getValue(), fsqExpl.getValue()); assertEquals(queryExpl.getDescription(), fsqExpl.getDescription()); fsq = new FunctionScoreQuery(query, 10f, Float.POSITIVE_INFINITY); @@ -598,7 +599,7 @@ public void testMinScoreExplain() throws IOException { FunctionScoreQuery ffsq = new FunctionScoreQuery(query, 0f, Float.POSITIVE_INFINITY); Explanation ffsqExpl = searcher.explain(ffsq, 0); assertTrue(ffsqExpl.isMatch()); - assertEquals(queryExpl.getValue(), ffsqExpl.getValue(), 0f); + assertEquals(queryExpl.getValue(), ffsqExpl.getValue()); assertEquals(queryExpl.getDescription(), ffsqExpl.getDescription()); ffsq = new FunctionScoreQuery(query, 10f, Float.POSITIVE_INFINITY); @@ -613,8 +614,8 @@ public void testPropagatesApproximations() throws IOException { searcher.setQueryCache(null); // otherwise we could get a cached entry that does not have approximations FunctionScoreQuery fsq = new FunctionScoreQuery(query, null, Float.POSITIVE_INFINITY); - for (boolean needsScores : new boolean[] {true, false}) { - Weight weight = searcher.createWeight(fsq, needsScores, 1f); + for (org.apache.lucene.search.ScoreMode scoreMode : org.apache.lucene.search.ScoreMode.values()) { + Weight weight = searcher.createWeight(fsq, scoreMode, 1f); Scorer scorer = weight.scorer(reader.leaves().get(0)); assertNotNull(scorer.twoPhaseIterator()); } diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java b/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java index 3d0eee79595f5..02653dcfd0e4d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java +++ b/server/src/test/java/org/elasticsearch/index/query/plugin/DummyQueryParserPlugin.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -52,8 +53,8 @@ public String toString(String field) { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - return matchAllDocsQuery.createWeight(searcher, needsScores, boost); + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return matchAllDocsQuery.createWeight(searcher, scoreMode, boost); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index fba71dd1e5296..e471874f6d664 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -236,7 +236,7 @@ public void testConflictingOpsOnReplica() throws Exception { for (IndexShard shard : shards) { try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new TermQuery(new Term("f", "2")), 10); - assertEquals("shard " + shard.routingEntry() + " misses new version", 1, search.totalHits); + assertEquals("shard " + shard.routingEntry() + " misses new version", 1, search.totalHits.value); } } } diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index d4dc71388ac7d..f64a9e38b871a 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -226,7 +226,7 @@ public void testNestedSorting() throws Exception { Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(7)); @@ -241,7 +241,7 @@ public void testNestedSorting() throws Exception { sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(28)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(13)); @@ -263,7 +263,7 @@ public void testNestedSorting() throws Exception { ); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6L)); + assertThat(topDocs.totalHits.value, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(23)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(12)); @@ -278,7 +278,7 @@ public void testNestedSorting() throws Exception { sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6L)); + assertThat(topDocs.totalHits.value, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(15)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(3)); @@ -294,7 +294,7 @@ public void testNestedSorting() throws Exception { nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(19)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(127)); @@ -310,7 +310,7 @@ public void testNestedSorting() throws Exception { nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); - assertThat(topDocs.totalHits, equalTo(8L)); + assertThat(topDocs.totalHits.value, equalTo(8L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(19)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-127)); @@ -336,7 +336,7 @@ protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) th Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index c643ea6cee045..93945231e2b6f 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -69,7 +69,7 @@ protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) th Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index 13d0e83e37e01..2d1ffb1e1a344 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -68,7 +68,7 @@ protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher, In Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(11)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 1300debd5ebda..0bee6eeb6ed12 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -304,7 +304,7 @@ public void testNestedSorting() throws Exception { Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(3)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a")); @@ -321,7 +321,7 @@ public void testNestedSorting() throws Exception { nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(7L)); + assertThat(topDocs.totalHits.value, equalTo(7L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(28)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o")); @@ -347,7 +347,7 @@ public void testNestedSorting() throws Exception { ); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); - assertThat(topDocs.totalHits, equalTo(6L)); + assertThat(topDocs.totalHits.value, equalTo(6L)); assertThat(topDocs.scoreDocs.length, equalTo(5)); assertThat(topDocs.scoreDocs[0].doc, equalTo(23)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m")); @@ -614,7 +614,7 @@ public void testMultiLevelNestedSorting() throws IOException { sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setNestedSort(new NestedSortBuilder("chapters.paragraphs"))); QueryBuilder queryBuilder = new MatchAllQueryBuilder(); TopFieldDocs topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(5L)); + assertThat(topFields.totalHits.value, equalTo(5L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("4")); @@ -630,25 +630,25 @@ public void testMultiLevelNestedSorting() throws IOException { { queryBuilder = new TermQueryBuilder("genre", "romance"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); queryBuilder = new TermQueryBuilder("genre", "science fiction"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(234L)); queryBuilder = new TermQueryBuilder("genre", "horror"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L)); queryBuilder = new TermQueryBuilder("genre", "cooking"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); } @@ -658,7 +658,7 @@ public void testMultiLevelNestedSorting() throws IOException { sortBuilder.order(SortOrder.DESC); queryBuilder = new MatchAllQueryBuilder(); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(5L)); + assertThat(topFields.totalHits.value, equalTo(5L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("1")); @@ -675,25 +675,25 @@ public void testMultiLevelNestedSorting() throws IOException { { queryBuilder = new TermQueryBuilder("genre", "romance"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); queryBuilder = new TermQueryBuilder("genre", "science fiction"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(849L)); queryBuilder = new TermQueryBuilder("genre", "horror"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(976L)); queryBuilder = new TermQueryBuilder("genre", "cooking"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(180L)); } @@ -708,7 +708,7 @@ public void testMultiLevelNestedSorting() throws IOException { .setNestedSort(new NestedSortBuilder("chapters.paragraphs")) ); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("4")); @@ -716,7 +716,7 @@ public void testMultiLevelNestedSorting() throws IOException { sortBuilder.order(SortOrder.DESC); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2")); @@ -736,7 +736,7 @@ public void testMultiLevelNestedSorting() throws IOException { ) ); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2")); @@ -744,7 +744,7 @@ public void testMultiLevelNestedSorting() throws IOException { sortBuilder.order(SortOrder.DESC); topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(2L)); + assertThat(topFields.totalHits.value, equalTo(2L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); assertThat(searcher.doc(topFields.scoreDocs[1].doc).get("_id"), equalTo("2")); @@ -762,25 +762,25 @@ public void testMultiLevelNestedSorting() throws IOException { queryBuilder = new TermQueryBuilder("genre", "romance"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L)); queryBuilder = new TermQueryBuilder("genre", "science fiction"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("1")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(Long.MAX_VALUE)); queryBuilder = new TermQueryBuilder("genre", "horror"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("3")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(Long.MAX_VALUE)); queryBuilder = new TermQueryBuilder("genre", "cooking"); topFields = search(queryBuilder, sortBuilder, queryShardContext, searcher); - assertThat(topFields.totalHits, equalTo(1L)); + assertThat(topFields.totalHits.value, equalTo(1L)); assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4")); assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L)); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java index 4479c7b390954..e9f52d7c3198d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexSearcherWrapperTests.java @@ -56,7 +56,7 @@ public void testReaderCloseListenerIsCalled() throws IOException { writer.addDocument(doc); DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); final AtomicInteger closeCalls = new AtomicInteger(0); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override @@ -82,7 +82,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { } outerCount.incrementAndGet(); }); - assertEquals(0, wrap.searcher().search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(0, wrap.searcher().search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); wrap.close(); assertFalse("wrapped reader is closed", wrap.reader().tryIncRef()); assertEquals(sourceRefCount, open.getRefCount()); @@ -106,7 +106,7 @@ public void testIsCacheable() throws IOException { writer.addDocument(doc); DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); searcher.setSimilarity(iwc.getSimilarity()); final AtomicInteger closeCalls = new AtomicInteger(0); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @@ -148,7 +148,7 @@ public void testNoWrap() throws IOException { writer.addDocument(doc); DirectoryReader open = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); IndexSearcher searcher = new IndexSearcher(open); - assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits); + assertEquals(1, searcher.search(new TermQuery(new Term("field", "doc")), 1).totalHits.value); searcher.setSimilarity(iwc.getSimilarity()); IndexSearcherWrapper wrapper = new IndexSearcherWrapper(); try (Engine.Searcher engineSearcher = new Engine.Searcher("foo", searcher)) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index e8fc8a71a5b83..713bc04634b0a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1957,9 +1957,9 @@ public void testSearcherWrapperIsUsed() throws IOException { } try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10); - assertEquals(search.totalHits, 1); + assertEquals(search.totalHits.value, 1); search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); - assertEquals(search.totalHits, 1); + assertEquals(search.totalHits.value, 1); } IndexSearcherWrapper wrapper = new IndexSearcherWrapper() { @Override @@ -1987,9 +1987,9 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { try (Engine.Searcher searcher = newShard.acquireSearcher("test")) { TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10); - assertEquals(search.totalHits, 0); + assertEquals(search.totalHits.value, 0); search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); - assertEquals(search.totalHits, 1); + assertEquals(search.totalHits.value, 1); } try (Engine.GetResult getResult = newShard .get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java index 9dcb712a05da7..9296b4f311138 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; @@ -260,9 +261,8 @@ void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId, boole try (IndexReader reader = DirectoryReader.open(dir)) { IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); - final boolean needsScores = false; - final Weight splitWeight = searcher.createNormalizedWeight(new ShardSplittingQuery(metaData, targetShardId, hasNested), - needsScores); + final Weight splitWeight = searcher.createWeight(searcher.rewrite(new ShardSplittingQuery(metaData, targetShardId, hasNested)), + ScoreMode.COMPLETE_NO_SCORES, 1f); final List leaves = reader.leaves(); for (final LeafReaderContext ctx : leaves) { Scorer scorer = splitWeight.scorer(ctx); diff --git a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java index cc1d0e827c71c..22089bc40e498 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FieldInvertState; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; @@ -45,6 +46,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; public class ScriptedSimilarityTests extends ESTestCase { @@ -65,7 +67,10 @@ private void doTestSameNormsAsBM25(boolean discountOverlaps) { final int length = TestUtil.nextInt(random(), 1, 100); final int position = random().nextInt(length); final int numOverlaps = random().nextInt(length); - FieldInvertState state = new FieldInvertState(Version.LATEST.major, "foo", position, length, numOverlaps, 100); + int maxTermFrequency = TestUtil.nextInt(random(), 1, 10); + int uniqueTermCount = TestUtil.nextInt(random(), 1, 10); + FieldInvertState state = new FieldInvertState(Version.LATEST.major, "foo", IndexOptions.DOCS_AND_FREQS, position, length, + numOverlaps, 100, maxTermFrequency, uniqueTermCount); assertEquals( sim2.computeNorm(state), sim1.computeNorm(state), @@ -81,7 +86,17 @@ public void testBasics() throws IOException { @Override public double execute(double weight, ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, - ScriptedSimilarity.Doc doc) throws IOException { + ScriptedSimilarity.Doc doc) { + + StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + if (Arrays.stream(stackTraceElements).anyMatch(ste -> { + return ste.getClassName().endsWith(".TermScorer") && + ste.getMethodName().equals("score"); + }) == false) { + // this might happen when computing max scores + return Float.MAX_VALUE; + } + assertEquals(1, weight, 0); assertNotNull(doc); assertEquals(2f, doc.getFreq(), 0); @@ -129,7 +144,7 @@ public double execute(double weight, ScriptedSimilarity.Query query, .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) .build(), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); w.close(); @@ -143,14 +158,13 @@ public void testInitScript() throws IOException { @Override public double execute(ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, - ScriptedSimilarity.Term term) throws IOException { - assertNotNull(field); + ScriptedSimilarity.Term term) { assertEquals(3, field.getDocCount()); assertEquals(5, field.getSumDocFreq()); assertEquals(6, field.getSumTotalTermFreq()); assertNotNull(term); - assertEquals(2, term.getDocFreq()); - assertEquals(3, term.getTotalTermFreq()); + assertEquals(1, term.getDocFreq()); + assertEquals(2, term.getTotalTermFreq()); assertNotNull(query); assertEquals(3.2f, query.getBoost(), 0); initCalled.set(true); @@ -166,7 +180,17 @@ public double execute(ScriptedSimilarity.Query query, ScriptedSimilarity.Field f @Override public double execute(double weight, ScriptedSimilarity.Query query, ScriptedSimilarity.Field field, ScriptedSimilarity.Term term, - ScriptedSimilarity.Doc doc) throws IOException { + ScriptedSimilarity.Doc doc) { + + StackTraceElement[] stackTraceElements = Thread.currentThread().getStackTrace(); + if (Arrays.stream(stackTraceElements).anyMatch(ste -> { + return ste.getClassName().endsWith(".TermScorer") && + ste.getMethodName().equals("score"); + }) == false) { + // this might happen when computing max scores + return Float.MAX_VALUE; + } + assertEquals(28, weight, 0d); assertNotNull(doc); assertEquals(2f, doc.getFreq(), 0); @@ -176,8 +200,8 @@ public double execute(double weight, ScriptedSimilarity.Query query, assertEquals(5, field.getSumDocFreq()); assertEquals(6, field.getSumTotalTermFreq()); assertNotNull(term); - assertEquals(2, term.getDocFreq()); - assertEquals(3, term.getTotalTermFreq()); + assertEquals(1, term.getDocFreq()); + assertEquals(2, term.getTotalTermFreq()); assertNotNull(query); assertEquals(3.2f, query.getBoost(), 0); called.set(true); @@ -191,8 +215,7 @@ public double execute(double weight, ScriptedSimilarity.Query query, IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); Document doc = new Document(); - doc.add(new TextField("f", "foo bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); + doc.add(new TextField("f", "bar baz", Store.NO)); w.addDocument(doc); doc = new Document(); @@ -202,19 +225,15 @@ public double execute(double weight, ScriptedSimilarity.Query query, doc = new Document(); doc.add(new TextField("f", "bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); w.close(); IndexSearcher searcher = new IndexSearcher(r); searcher.setSimilarity(sim); - Query query = new BoostQuery(new BooleanQuery.Builder() - .add(new TermQuery(new Term("f", "foo")), Occur.SHOULD) - .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) - .build(), 3.2f); + Query query = new BoostQuery(new TermQuery(new Term("f", "foo")), 3.2f); TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); assertTrue(initCalled.get()); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index e155639f143c6..88bc4381626d4 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; @@ -72,7 +73,7 @@ public String toString(String field) { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new ConstantScoreWeight(this, boost) { @Override @@ -414,7 +415,7 @@ public void onUse(Query query) {} IndicesQueryCache cache = new IndicesQueryCache(settings); s.setQueryCache(cache); Query query = new MatchAllDocsQuery(); - final DummyWeight weight = new DummyWeight(s.createNormalizedWeight(query, false)); + final DummyWeight weight = new DummyWeight(s.createWeight(s.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f)); final Weight cached = cache.doCache(weight, s.getQueryCachingPolicy()); assertNotSame(weight, cached); assertFalse(weight.scorerCalled); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 8059c8a103927..4418a7cfb7f83 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -343,7 +343,7 @@ public BytesReference get() { try (BytesStreamOutput out = new BytesStreamOutput()) { IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1); - assertEquals(1, topDocs.totalHits); + assertEquals(1, topDocs.totalHits.value); Document document = reader.document(topDocs.scoreDocs[0].doc); out.writeString(document.get("value")); loadedFromCache = false; diff --git a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 485fd92099630..119a74262bf7a 100644 --- a/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -240,6 +240,35 @@ public void testUnderscoreInAnalyzerName() throws IOException { } } + public void testStandardFilterBWC() throws IOException { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT.minimumCompatibilityVersion()); + // bwc deprecation + { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_standard.tokenizer", "standard") + .put("index.analysis.analyzer.my_standard.filter", "standard") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .build(); + IndexAnalyzers analyzers = getIndexAnalyzers(settings); + assertTokenStreamContents(analyzers.get("my_standard").tokenStream("", "test"), new String[]{"test"}); + assertWarnings("The [standard] token filter is deprecated and will be removed in a future version."); + } + // removal + { + final Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_standard.tokenizer", "standard") + .put("index.analysis.analyzer.my_standard.filter", "standard") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_7_0_0_alpha1) + .build(); + IndexAnalyzers analyzers = getIndexAnalyzers(settings); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> + analyzers.get("my_standard").tokenStream("", "")); + assertThat(exc.getMessage(), equalTo("The [standard] token filter has been removed.")); + } + } + /** * Tests that plugins can register pre-configured char filters that vary in behavior based on Elasticsearch version, Lucene version, * and that do not vary based on version at all. @@ -376,34 +405,34 @@ public void reset() throws IOException { } } AnalysisRegistry registry = new AnalysisModule(TestEnvironment.newEnvironment(emptyNodeSettings), - singletonList(new AnalysisPlugin() { - @Override - public List getPreConfiguredTokenizers() { - return Arrays.asList( + singletonList(new AnalysisPlugin() { + @Override + public List getPreConfiguredTokenizers() { + return Arrays.asList( PreConfiguredTokenizer.singleton("no_version", () -> new FixedTokenizer("no_version"), - noVersionSupportsMultiTerm ? () -> AppendTokenFilter.factoryForSuffix("no_version") : null), + noVersionSupportsMultiTerm ? () -> AppendTokenFilter.factoryForSuffix("no_version") : null), PreConfiguredTokenizer.luceneVersion("lucene_version", - luceneVersion -> new FixedTokenizer(luceneVersion.toString()), - luceneVersionSupportsMultiTerm ? - luceneVersion -> AppendTokenFilter.factoryForSuffix(luceneVersion.toString()) : null), + luceneVersion -> new FixedTokenizer(luceneVersion.toString()), + luceneVersionSupportsMultiTerm ? + luceneVersion -> AppendTokenFilter.factoryForSuffix(luceneVersion.toString()) : null), PreConfiguredTokenizer.elasticsearchVersion("elasticsearch_version", - esVersion -> new FixedTokenizer(esVersion.toString()), - elasticsearchVersionSupportsMultiTerm ? - esVersion -> AppendTokenFilter.factoryForSuffix(esVersion.toString()) : null) - ); - } - })).getAnalysisRegistry(); + esVersion -> new FixedTokenizer(esVersion.toString()), + elasticsearchVersionSupportsMultiTerm ? + esVersion -> AppendTokenFilter.factoryForSuffix(esVersion.toString()) : null) + ); + } + })).getAnalysisRegistry(); Version version = VersionUtils.randomVersion(random()); IndexAnalyzers analyzers = getIndexAnalyzers(registry, Settings.builder() - .put("index.analysis.analyzer.no_version.tokenizer", "no_version") - .put("index.analysis.analyzer.lucene_version.tokenizer", "lucene_version") - .put("index.analysis.analyzer.elasticsearch_version.tokenizer", "elasticsearch_version") - .put(IndexMetaData.SETTING_VERSION_CREATED, version) - .build()); - assertTokenStreamContents(analyzers.get("no_version").tokenStream("", "test"), new String[] {"no_version"}); - assertTokenStreamContents(analyzers.get("lucene_version").tokenStream("", "test"), new String[] {version.luceneVersion.toString()}); - assertTokenStreamContents(analyzers.get("elasticsearch_version").tokenStream("", "test"), new String[] {version.toString()}); + .put("index.analysis.analyzer.no_version.tokenizer", "no_version") + .put("index.analysis.analyzer.lucene_version.tokenizer", "lucene_version") + .put("index.analysis.analyzer.elasticsearch_version.tokenizer", "elasticsearch_version") + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .build()); + assertTokenStreamContents(analyzers.get("no_version").tokenStream("", "test"), new String[]{"no_version"}); + assertTokenStreamContents(analyzers.get("lucene_version").tokenStream("", "test"), new String[]{version.luceneVersion.toString()}); + assertTokenStreamContents(analyzers.get("elasticsearch_version").tokenStream("", "test"), new String[]{version.toString()}); // These are current broken by https://github.com/elastic/elasticsearch/issues/24752 // assertEquals("test" + (noVersionSupportsMultiTerm ? "no_version" : ""), diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java index 075d5bc2aa3df..a42804692fbf3 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java @@ -112,8 +112,8 @@ public void testToXContent() throws IOException { searchHits.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertEquals("{\"hits\":{\"total\":1000,\"max_score\":1.5," + - "\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":\"-Infinity\"},"+ - "{\"_type\":\"type\",\"_id\":\"id2\",\"_score\":\"-Infinity\"}]}}", Strings.toString(builder)); + "\"hits\":[{\"_type\":\"type\",\"_id\":\"id1\",\"_score\":null},"+ + "{\"_type\":\"type\",\"_id\":\"id2\",\"_score\":null}]}}", Strings.toString(builder)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java index 9919e9dcdbbd1..6a77a89fc58f0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java @@ -62,7 +62,7 @@ private boolean needsScores(IndexService index, String agg) throws IOException { final AggregatorFactories factories = AggregatorFactories.parseAggregators(aggParser).build(context, null); final Aggregator[] aggregators = factories.createTopLevelAggregators(); assertEquals(1, aggregators.length); - return aggregators[0].needsScores(); + return aggregators[0].scoreMode().needsScores(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java index f9abdeed50f82..e3fe39db95246 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; @@ -66,6 +67,11 @@ public DocIdSetIterator iterator() { throw new UnsupportedOperationException(); } + @Override + public float getMaxScore(int upTo) throws IOException { + return Float.MAX_VALUE; + } + @Override public Weight getWeight() { throw new UnsupportedOperationException(); @@ -107,8 +113,8 @@ public void collect(int doc, long bucket) throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } @Override @@ -136,8 +142,8 @@ public void collect(int doc, long bucket) throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } @Override @@ -173,8 +179,8 @@ public void setScorer(Scorer scorer) throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java index 8d60dde58343f..2f99ebbf323d5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; @@ -70,8 +71,8 @@ public void testReplay() throws Exception { when(searchContext.query()).thenReturn(rewrittenQuery); BestBucketsDeferringCollector collector = new BestBucketsDeferringCollector(searchContext, false) { @Override - public boolean needsScores() { - return true; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE; } }; Set deferredCollectedDocIds = new HashSet<>(); @@ -126,8 +127,8 @@ public void postCollection() throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java index 86e937a356b46..3a740e868ee23 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollectorTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; @@ -105,8 +106,8 @@ public void postCollection() throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java index 3289c5a7f6424..0fba35358ecb0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java @@ -26,10 +26,12 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.text.Text; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -104,12 +106,13 @@ protected InternalTopHits createTestInstance(String name, List inpu totalHits += internalHits.getTotalHits(); maxScore = max(maxScore, internalHits.getMaxScore()); for (int i = 0; i < internalHits.getHits().length; i++) { - ScoreDoc doc = inputs.get(input).getTopDocs().scoreDocs[i]; + ScoreDoc doc = inputs.get(input).getTopDocs().topDocs.scoreDocs[i]; if (testInstancesLookSortedByField) { doc = new FieldDoc(doc.doc, doc.score, ((FieldDoc) doc).fields, input); } else { @@ -253,7 +256,7 @@ protected InternalTopHits mutateInstance(InternalTopHits instance) { String name = instance.getName(); int from = instance.getFrom(); int size = instance.getSize(); - TopDocs topDocs = instance.getTopDocs(); + TopDocsAndMaxScore topDocs = instance.getTopDocs(); SearchHits searchHits = instance.getHits(); List pipelineAggregators = instance.pipelineAggregators(); Map metaData = instance.getMetaData(); @@ -268,7 +271,8 @@ protected InternalTopHits mutateInstance(InternalTopHits instance) { size += between(1, 100); break; case 3: - topDocs = new TopDocs(topDocs.totalHits + between(1, 100), topDocs.scoreDocs, topDocs.getMaxScore() + randomFloat()); + topDocs = new TopDocsAndMaxScore(new TopDocs(new TotalHits(topDocs.topDocs.totalHits.value + between(1, 100), + topDocs.topDocs.totalHits.relation), topDocs.topDocs.scoreDocs), topDocs.maxScore + randomFloat()); break; case 4: searchHits = new SearchHits(searchHits.getHits(), searchHits.totalHits + between(1, 100), diff --git a/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java index 92488a69d6d60..d5ceec9d7c285 100644 --- a/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchIT.java @@ -60,6 +60,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; public class TransportTwoNodesSearchIT extends ESIntegTestCase { @@ -146,16 +147,16 @@ public void testDfsQueryThenFetch() throws Exception { SearchHit hit = hits[i]; assertThat(hit.getExplanation(), notNullValue()); assertThat(hit.getExplanation().getDetails().length, equalTo(1)); - assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getDescription(), - equalTo("docFreq")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getValue(), - equalTo(100.0f)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getDescription(), - equalTo("docCount")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getValue(), - equalTo(100.0f)); + assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(), + startsWith("n,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(), + equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(), + startsWith("N,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(), + equalTo(100L)); assertThat("id[" + hit.getId() + "] -> " + hit.getExplanation().toString(), hit.getId(), equalTo(Integer.toString(100 - total - i - 1))); } total += hits.length; @@ -181,16 +182,16 @@ public void testDfsQueryThenFetchWithSort() throws Exception { SearchHit hit = hits[i]; assertThat(hit.getExplanation(), notNullValue()); assertThat(hit.getExplanation().getDetails().length, equalTo(1)); - assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails().length, equalTo(2)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getDescription(), - equalTo("docFreq")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[0].getValue(), - equalTo(100.0f)); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getDescription(), - equalTo("docCount")); - assertThat(hit.getExplanation().getDetails()[0].getDetails()[0].getDetails()[1].getValue(), - equalTo(100.0f)); + assertThat(hit.getExplanation().getDetails()[0].getDetails().length, equalTo(3)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails().length, equalTo(2)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getDescription(), + startsWith("n,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[0].getValue(), + equalTo(100L)); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getDescription(), + startsWith("N,")); + assertThat(hit.getExplanation().getDetails()[0].getDetails()[1].getDetails()[1].getValue(), + equalTo(100L)); assertThat("id[" + hit.getId() + "]", hit.getId(), equalTo(Integer.toString(total + i))); } total += hits.length; diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 6657ad9823ffe..c9679ae2ea96c 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -99,7 +99,7 @@ static class MyScript extends ScoreScript implements ExplainableSearchScript { MyScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { super(params, lookup, leafContext); } - + @Override public Explanation explain(Explanation subQueryScore) throws IOException { Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore); @@ -139,10 +139,9 @@ public void testExplainScript() throws InterruptedException, IOException, Execut int idCounter = 19; for (SearchHit hit : hits.getHits()) { assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); - assertThat(hit.getExplanation().toString(), - containsString(Double.toString(idCounter) + " = This script returned " + Double.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString("freq=1.0")); - assertThat(hit.getExplanation().toString(), containsString("termFreq=1.0")); + assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); + assertThat(hit.getExplanation().toString(), containsString("1 = n")); + assertThat(hit.getExplanation().toString(), containsString("1 = N")); assertThat(hit.getExplanation().getDetails().length, equalTo(2)); idCounter--; } diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java index fc11554dfb3fe..7e96539084e74 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.ArrayList; @@ -132,8 +133,8 @@ public void testScriptScoresWithAgg() throws IOException { } public void testMinScoreFunctionScoreBasic() throws IOException { - float score = randomFloat(); - float minScore = randomFloat(); + float score = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); + float minScore = randomValueOtherThanMany((f) -> Float.compare(f, 0) < 0, ESTestCase::randomFloat); index(INDEX, TYPE, jsonBuilder().startObject() .field("num", 2) .field("random_score", score) // Pass the random score as a document field so that it can be extracted in the script @@ -167,8 +168,8 @@ public void testMinScoreFunctionScoreBasic() throws IOException { public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOException, ExecutionException, InterruptedException { List docs = new ArrayList<>(); int numDocs = randomIntBetween(1, 100); - int scoreOffset = randomIntBetween(-2 * numDocs, 2 * numDocs); - int minScore = randomIntBetween(-2 * numDocs, 2 * numDocs); + int scoreOffset = randomIntBetween(0, 2 * numDocs); + int minScore = randomIntBetween(0, 2 * numDocs); for (int i = 0; i < numDocs; i++) { docs.add(client().prepareIndex(INDEX, TYPE, Integer.toString(i)).setSource("num", i + scoreOffset)); } diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java index 5e10292fa3e7c..fd924ce07ca93 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.RandomApproximationQuery; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Sort; @@ -218,7 +219,7 @@ public int hashCode() { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { return new Weight(this) { @Override public void extractTerms(Set terms) { @@ -267,7 +268,7 @@ public void testScorerSupplier() throws IOException { w.close(); IndexSearcher s = newSearcher(reader); s.setQueryCache(null); - Weight weight = s.createNormalizedWeight(new DummyQuery(), randomBoolean()); + Weight weight = s.createWeight(s.rewrite(new DummyQuery()), randomFrom(ScoreMode.values()), 1f); // exception when getting the scorer expectThrows(UnsupportedOperationException.class, () -> weight.scorer(s.getIndexReader().leaves().get(0))); // no exception, means scorerSupplier is delegated diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java b/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java index 14fe8d58132f9..00b859394c65f 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/RandomQueryGenerator.java @@ -61,8 +61,7 @@ public static QueryBuilder randomQueryBuilder(List stringFields, List {}); assertFalse(rescore); - assertEquals(searcher.count(query), context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); + assertEquals(searcher.count(query), context.queryResult().topDocs().topDocs.totalHits.value); } private void countTestCase(boolean withDeletions) throws Exception { @@ -173,15 +171,12 @@ public void testPostFilterDisablesCountOptimization() throws Exception { context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(1, context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); contextSearcher = new IndexSearcher(reader); context.parsedPostFilter(new ParsedQuery(new MatchNoDocsQuery())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(0, context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); reader.close(); dir.close(); } @@ -209,9 +204,8 @@ public void testTerminateAfterWithFilter() throws Exception { for (int i = 0; i < 10; i++) { context.parsedPostFilter(new ParsedQuery(new TermQuery(new Term("foo", Integer.toString(i))))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(1, context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } reader.close(); dir.close(); @@ -234,14 +228,12 @@ public void testMinScoreDisablesCountOptimization() throws Exception { context.setSize(0); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(1, context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); + assertEquals(1, context.queryResult().topDocs().topDocs.totalHits.value); contextSearcher = new IndexSearcher(reader); context.minimumScore(100); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertEquals(0, context.queryResult().topDocs().totalHits); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); + assertEquals(0, context.queryResult().topDocs().topDocs.totalHits.value); reader.close(); dir.close(); } @@ -288,28 +280,25 @@ public void testInOrderScrollOptimization() throws Exception { ScrollContext scrollContext = new ScrollContext(); scrollContext.lastEmittedDoc = null; scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = -1; + scrollContext.totalHits = null; context.scrollContext(scrollContext); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); int size = randomIntBetween(2, 5); context.setSize(size); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); contextSearcher = getAssertingEarlyTerminationSearcher(reader, size); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertTrue(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(size)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); - assertThat(context.queryResult().topDocs().scoreDocs[0].doc, greaterThanOrEqualTo(size)); + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0].doc, greaterThanOrEqualTo(size)); reader.close(); dir.close(); } @@ -343,25 +332,22 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.setSize(1); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); context.setSize(0); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { context.setSize(1); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(1F)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); } { context.setSize(1); @@ -372,17 +358,15 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.parsedQuery(new ParsedQuery(bq)); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), greaterThan(0f)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); context.setSize(0); context.parsedQuery(new ParsedQuery(bq)); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); } { context.setSize(1); @@ -390,9 +374,8 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.queryCollectors().put(TotalHitCountCollector.class, collector); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), greaterThan(0f)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); assertThat(collector.getTotalHits(), equalTo(1)); context.queryCollectors().clear(); } @@ -402,9 +385,8 @@ public void testTerminateAfterEarlyTermination() throws Exception { context.queryCollectors().put(TotalHitCountCollector.class, collector); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertTrue(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(1L)); - assertThat(context.queryResult().topDocs().getMaxScore(), equalTo(Float.NaN)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(0)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } @@ -441,19 +423,19 @@ public void testIndexSortingEarlyTermination() throws Exception { final IndexReader reader = DirectoryReader.open(dir); IndexSearcher contextSearcher = new IndexSearcher(reader); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); - FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); + FieldDoc fieldDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; assertThat(fieldDoc.fields[0], equalTo(1)); { context.parsedPostFilter(new ParsedQuery(new MinDocQuery(1))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo(numDocs - 1L)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo(numDocs - 1L)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); context.parsedPostFilter(null); @@ -461,9 +443,9 @@ public void testIndexSortingEarlyTermination() throws Exception { context.queryCollectors().put(TotalHitCountCollector.class, totalHitCountCollector); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); assertThat(totalHitCountCollector.getTotalHits(), equalTo(numDocs)); context.queryCollectors().clear(); @@ -474,14 +456,14 @@ public void testIndexSortingEarlyTermination() throws Exception { context.trackTotalHits(false); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().scoreDocs.length, equalTo(1)); - assertThat(context.queryResult().topDocs().scoreDocs[0], instanceOf(FieldDoc.class)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(1)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs[0], instanceOf(FieldDoc.class)); assertThat(fieldDoc.fields[0], anyOf(equalTo(1), equalTo(2))); } reader.close(); @@ -520,27 +502,27 @@ public void testIndexSortScrollOptimization() throws Exception { ScrollContext scrollContext = new ScrollContext(); scrollContext.lastEmittedDoc = null; scrollContext.maxScore = Float.NaN; - scrollContext.totalHits = -1; + scrollContext.totalHits = null; context.scrollContext(scrollContext); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.setSize(10); context.sort(searchSortAndFormat); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertNull(context.queryResult().terminatedEarly()); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); - int sizeMinus1 = context.queryResult().topDocs().scoreDocs.length - 1; - FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[sizeMinus1]; + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + int sizeMinus1 = context.queryResult().topDocs().topDocs.scoreDocs.length - 1; + FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[sizeMinus1]; contextSearcher = getAssertingEarlyTerminationSearcher(reader, 10); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); assertNull(context.queryResult().terminatedEarly()); - assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); assertThat(context.terminateAfter(), equalTo(0)); - assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs)); - FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0]; + assertThat(context.queryResult().getTotalHits().value, equalTo((long) numDocs)); + FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().topDocs.scoreDocs[0]; for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) { @SuppressWarnings("unchecked") FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator(1, i); diff --git a/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java b/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java index 846c411881f4f..70eb0266eea38 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/DocValuesSliceQueryTests.java @@ -32,6 +32,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.UUIDs; @@ -112,8 +113,8 @@ public void collect(int doc) throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }); } diff --git a/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java b/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java index 3fa4ce410529a..9ae4b9bc7daf5 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java @@ -31,7 +31,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.QueryUtils; - +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; @@ -106,8 +106,8 @@ public void collect(int doc) throws IOException { } @Override - public boolean needsScores() { - return false; + public ScoreMode scoreMode() { + return ScoreMode.COMPLETE_NO_SCORES; } }); } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 65c58e631ec0e..ca21cbc86ca9d 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -526,7 +526,7 @@ public void testThatSynonymsWork() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put("analysis.analyzer.suggest_analyzer_synonyms.type", "custom") .put("analysis.analyzer.suggest_analyzer_synonyms.tokenizer", "standard") - .putList("analysis.analyzer.suggest_analyzer_synonyms.filter", "standard", "lowercase", "my_synonyms") + .putList("analysis.analyzer.suggest_analyzer_synonyms.filter", "lowercase", "my_synonyms") .put("analysis.filter.my_synonyms.type", "synonym") .putList("analysis.filter.my_synonyms.synonyms", "foo,renamed"); completionMappingBuilder.searchAnalyzer("suggest_analyzer_synonyms").indexAnalyzer("suggest_analyzer_synonyms"); @@ -804,7 +804,7 @@ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exce public void testThatSuggestStopFilterWorks() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put("index.analysis.analyzer.stoptest.tokenizer", "standard") - .putList("index.analysis.analyzer.stoptest.filter", "standard", "suggest_stop_filter") + .putList("index.analysis.analyzer.stoptest.filter", "suggest_stop_filter") .put("index.analysis.filter.suggest_stop_filter.type", "stop") .put("index.analysis.filter.suggest_stop_filter.remove_trailing", false); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index aaeaadd4c9f83..995a2c10fe555 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -687,7 +687,7 @@ public void testShardFailures() throws IOException, InterruptedException { .put(indexSettings()) .put(IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey(), 4) .put("index.analysis.analyzer.suggest.tokenizer", "standard") - .putList("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler") + .putList("index.analysis.analyzer.suggest.filter", "lowercase", "shingler") .put("index.analysis.filter.shingler.type", "shingle") .put("index.analysis.filter.shingler.min_shingle_size", 2) .put("index.analysis.filter.shingler.max_shingle_size", 5) @@ -748,7 +748,7 @@ public void testEmptyShards() throws IOException, InterruptedException { .put(indexSettings()) .put(IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey(), 4) .put("index.analysis.analyzer.suggest.tokenizer", "standard") - .putList("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler") + .putList("index.analysis.analyzer.suggest.filter", "lowercase", "shingler") .put("index.analysis.filter.shingler.type", "shingle") .put("index.analysis.filter.shingler.min_shingle_size", 2) .put("index.analysis.filter.shingler.max_shingle_size", 5) diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 925526323a540..ca95310cd501f 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.JaroWinklerDistance; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -76,7 +76,7 @@ public void testEqualsAndHashcode() throws IOException { public void testFromString() { assertThat(DirectCandidateGeneratorBuilder.resolveDistance("internal"), equalTo(DirectSpellChecker.INTERNAL_LEVENSHTEIN)); assertThat(DirectCandidateGeneratorBuilder.resolveDistance("damerau_levenshtein"), instanceOf(LuceneLevenshteinDistance.class)); - assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenshtein"), instanceOf(LevensteinDistance.class)); + assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenshtein"), instanceOf(LevenshteinDistance.class)); assertThat(DirectCandidateGeneratorBuilder.resolveDistance("jaro_winkler"), instanceOf(JaroWinklerDistance.class)); assertThat(DirectCandidateGeneratorBuilder.resolveDistance("ngram"), instanceOf(NGramDistance.class)); diff --git a/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java b/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java index 921a09e98e691..157adf9e55cf2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/index/analysis/MyFilterTokenFilterFactory.java @@ -20,11 +20,10 @@ import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.core.StopAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory { @@ -34,6 +33,6 @@ public MyFilterTokenFilterFactory(IndexSettings indexSettings, Environment env, @Override public TokenStream create(TokenStream tokenStream) { - return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + return new StopFilter(tokenStream, EnglishAnalyzer.ENGLISH_STOP_WORDS_SET); } } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 5298c3995cec2..2164fe32a3945 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; -import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory; @@ -167,7 +166,6 @@ private static String toCamelCase(String s) { .put("soraninormalization", MovedToAnalysisCommon.class) .put("soranistem", MovedToAnalysisCommon.class) .put("spanishlightstem", MovedToAnalysisCommon.class) - .put("standard", StandardTokenFilterFactory.class) .put("stemmeroverride", MovedToAnalysisCommon.class) .put("stop", StopTokenFilterFactory.class) .put("swedishlightstem", MovedToAnalysisCommon.class) @@ -267,8 +265,9 @@ protected Map> getTokenizers() { */ protected Map> getPreConfiguredTokenFilters() { Map> filters = new HashMap<>(); - filters.put("standard", null); filters.put("lowercase", null); + // for old indices + filters.put("standard", Void.class); return filters; } diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 0ee5798efb30b..71d40a7b86ab6 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -393,7 +393,7 @@ public class MockSimilarityScript extends SimilarityScript { } @Override - public double execute(double weight, Query query, Field field, Term term, Doc doc) throws IOException { + public double execute(double weight, Query query, Field field, Term term, Doc doc) { Map map = new HashMap<>(); map.put("weight", weight); map.put("query", query); @@ -413,7 +413,7 @@ public class MockSimilarityWeightScript extends SimilarityWeightScript { } @Override - public double execute(Query query, Field field, Term term) throws IOException { + public double execute(Query query, Field field, Term term) { Map map = new HashMap<>(); map.put("query", query); map.put("field", field); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 22c5772ff2d53..17202839a65fd 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -367,7 +368,7 @@ protected A searchAndReduc List aggs = new ArrayList<> (); Query rewritten = searcher.rewrite(query); - Weight weight = searcher.createWeight(rewritten, true, 1f); + Weight weight = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f); MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket); C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index fc2a85b35a95b..27bcb5868c548 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -143,7 +143,7 @@ public AssertingIndexSearcher newSearcher(Engine.Searcher searcher) throws Engin } // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); - assertingIndexSearcher.setSimilarity(searcher.searcher().getSimilarity(true)); + assertingIndexSearcher.setSimilarity(searcher.searcher().getSimilarity()); assertingIndexSearcher.setQueryCache(filterCache); assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; @@ -185,7 +185,7 @@ public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrap public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher) { final AssertingIndexSearcher assertingIndexSearcher = newSearcher(engineSearcher); - assertingIndexSearcher.setSimilarity(engineSearcher.searcher().getSimilarity(true)); + assertingIndexSearcher.setSimilarity(engineSearcher.searcher().getSimilarity()); // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json index 38937a9b5af93..e69c2db6ff400 100644 --- a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.json @@ -42,7 +42,7 @@ }, "czechAnalyzerWithStemmer":{ "tokenizer":"standard", - "filter":["standard", "lowercase", "stop", "czech_stem"] + "filter":["lowercase", "stop", "czech_stem"] }, "decompoundingAnalyzer":{ "tokenizer":"standard", diff --git a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml index f7a57d14dbe3d..82f933296a314 100644 --- a/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml +++ b/test/framework/src/main/resources/org/elasticsearch/analysis/common/test1.yml @@ -33,7 +33,7 @@ index : version: 3.6 czechAnalyzerWithStemmer : tokenizer : standard - filter : [standard, lowercase, stop, czech_stem] + filter : [lowercase, stop, czech_stem] decompoundingAnalyzer : tokenizer : standard filter : [dict_dec] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 223b7f00807fe..6d3864aa3eba2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -375,6 +375,7 @@ private Terms wrapTerms(Terms terms, String field) throws IOException { class FieldNamesTerms extends FilterTerms { final long size; final long sumDocFreq; + final long sumTotalFreq; FieldNamesTerms(Terms in) throws IOException { super(in); @@ -382,13 +383,15 @@ class FieldNamesTerms extends FilterTerms { // re-compute the stats for the field to take // into account the filtered terms. final TermsEnum e = iterator(); - long size = 0, sumDocFreq = 0; + long size = 0, sumDocFreq = 0, sumTotalFreq = 0; while (e.next() != null) { size ++; sumDocFreq += e.docFreq(); + sumTotalFreq += e.totalTermFreq(); } this.size = size; this.sumDocFreq = sumDocFreq; + this.sumTotalFreq = sumTotalFreq; } @Override @@ -406,6 +409,11 @@ public long getSumDocFreq() throws IOException { return sumDocFreq; } + @Override + public long getSumTotalTermFreq() throws IOException { + return sumTotalFreq; + } + @Override public int getDocCount() throws IOException { // it is costly to recompute this value so we assume that docCount == maxDoc. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java index 9426b64364783..60b598a3a99c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapper.java @@ -183,7 +183,7 @@ protected IndexSearcher wrap(IndexSearcher searcher) throws EngineException { IndexSearcher indexSearcher = new IndexSearcherWrapper((DocumentSubsetDirectoryReader) directoryReader); indexSearcher.setQueryCache(indexSearcher.getQueryCache()); indexSearcher.setQueryCachingPolicy(indexSearcher.getQueryCachingPolicy()); - indexSearcher.setSimilarity(indexSearcher.getSimilarity(true)); + indexSearcher.setSimilarity(indexSearcher.getSimilarity()); return indexSearcher; } return searcher; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java index dca2f37f3f224..bd6ac12ee3c1b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReaderTests.java @@ -108,14 +108,14 @@ public void testSearch() throws Exception { new TermQuery(new Term("field", "value1")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(1L)); + assertThat(result.totalHits.value, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(0)); indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value2")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(1L)); + assertThat(result.totalHits.value, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(1)); // this doc has been marked as deleted: @@ -123,13 +123,13 @@ public void testSearch() throws Exception { new TermQuery(new Term("field", "value3")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(0L)); + assertThat(result.totalHits.value, equalTo(0L)); indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value4")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); - assertThat(result.totalHits, equalTo(1L)); + assertThat(result.totalHits.value, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(3)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java index dccbd14c04704..e364b0a7e8a66 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java @@ -232,7 +232,7 @@ public void onRemoval(ShardId shardId, Accountable accountable) { new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); IndexSearcher result = securityIndexSearcherWrapper.wrap(indexSearcher); assertThat(result, not(sameInstance(indexSearcher))); - assertThat(result.getSimilarity(true), sameInstance(indexSearcher.getSimilarity(true))); + assertThat(result.getSimilarity(), sameInstance(indexSearcher.getSimilarity())); bitsetFilterCache.close(); } @@ -270,7 +270,8 @@ public void testIntersectScorerAndRoleBits() throws Exception { iw.close(); DirectoryReader directoryReader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(directoryReader); - Weight weight = searcher.createNormalizedWeight(new TermQuery(new Term("field2", "value1")), false); + Weight weight = searcher.createWeight(new TermQuery(new Term("field2", "value1")), + org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); LeafReaderContext leaf = directoryReader.leaves().get(0); @@ -545,8 +546,8 @@ public Query rewrite(IndexReader reader) throws IOException { } @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException { - return new CreateScorerOnceWeight(query.createWeight(searcher, needsScores, boost)); + public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException { + return new CreateScorerOnceWeight(query.createWeight(searcher, scoreMode, boost)); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java index 1d6d524cbbb70..efe154f8d780c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/accesscontrol/OptOutQueryCacheTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Weight; @@ -65,7 +66,7 @@ public void testOptOutQueryCacheSafetyCheck() throws IOException { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.MUST); builder.add(new TermQuery(new Term("no", "baz")), BooleanClause.Occur.MUST_NOT); - Weight weight = builder.build().createWeight(searcher, false, 1f); + Weight weight = builder.build().createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, 1f); // whenever the allowed fields match the fields in the query and we do not deny access to any fields we allow caching. IndicesAccessControl.IndexAccessControl permissions = new IndicesAccessControl.IndexAccessControl(true, diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 deleted file mode 100644 index fdedaf3fc5756..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-7.5.0-snapshot-13b9e28f9d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73dd7703a94ec2357581f65ee7c1c4d618ff310f \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 new file mode 100644 index 0000000000000..683b585bb2f61 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-4d78db26be.jar.sha1 @@ -0,0 +1 @@ +126faacb28d1b8cc1ab81d702973d057892120d1 \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java index 9570eaf1b6a06..0f00822e3f445 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/StringUtils.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.sql.util; -import org.apache.lucene.search.spell.LevensteinDistance; +import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -248,7 +248,7 @@ public static String toString(SearchSourceBuilder source) { } public static List findSimilar(String match, Iterable potentialMatches) { - LevensteinDistance ld = new LevensteinDistance(); + LevenshteinDistance ld = new LevenshteinDistance(); List> scoredMatches = new ArrayList<>(); for (String potentialMatch : potentialMatches) { float distance = ld.getDistance(match, potentialMatch);