diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c3c484ae59b1f..e792cfa3378ff 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-7d0a7782fa +lucene = 8.0.0-snapshot-31d7dfe6b1 # optional dependencies spatial4j = 0.7 diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index aaca4f9b1860f..75e0087831a62 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -44,7 +44,6 @@ import org.apache.lucene.analysis.core.DecimalDigitFilter; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.LetterTokenizer; -import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.apache.lucene.analysis.core.UpperCaseFilter; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.cz.CzechAnalyzer; @@ -308,7 +307,8 @@ public Map> getTokenizers() { tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); - tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); + // TODO deprecate and remove in API + tokenizers.put("lowercase", XLowerCaseTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("pattern", PatternTokenizerFactory::new); @@ -503,7 +503,8 @@ public List getPreConfiguredTokenizers() { () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null)); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null)); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null)); - tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() { + // TODO deprecate and remove in API + tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() { @Override public String name() { return "lowercase"; diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java new file mode 100644 index 0000000000000..3f11c52858aa4 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharacterUtils; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.util.CharTokenizer; + +import java.io.IOException; + +@Deprecated +class XLowerCaseTokenizer extends Tokenizer { + + private int offset = 0, bufferIndex = 0, dataLen = 0, finalOffset = 0; + + private static final int IO_BUFFER_SIZE = 4096; + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + + private final CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE); + + @Override + public final boolean incrementToken() throws IOException { + clearAttributes(); + int length = 0; + int start = -1; // this variable is always initialized + int end = -1; + char[] buffer = termAtt.buffer(); + while (true) { + if (bufferIndex >= dataLen) { + offset += dataLen; + CharacterUtils.fill(ioBuffer, input); // read supplementary char aware with CharacterUtils + if (ioBuffer.getLength() == 0) { + dataLen = 0; // so next offset += dataLen won't decrement offset + if (length > 0) { + break; + } else { + finalOffset = correctOffset(offset); + return false; + } + } + dataLen = ioBuffer.getLength(); + bufferIndex = 0; + } + // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone + final int c = Character.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength()); + final int charCount = Character.charCount(c); + bufferIndex += charCount; + + if (Character.isLetter(c)) { // if it's a token char + if (length == 0) { // start of token + assert start == -1; + start = offset + bufferIndex - charCount; + end = start; + } else if (length >= buffer.length-1) { // check if a supplementary could run out of bounds + buffer = termAtt.resizeBuffer(2+length); // make sure a supplementary fits in the buffer + } + end += charCount; + length += Character.toChars(Character.toLowerCase(c), buffer, length); // buffer it, normalized + int maxTokenLen = CharTokenizer.DEFAULT_MAX_WORD_LEN; + if (length >= maxTokenLen) { // buffer overflow! make sure to check for >= surrogate pair could break == test + break; + } + } else if (length > 0) { // at non-Letter w/ chars + break; // return 'em + } + } + + termAtt.setLength(length); + assert start != -1; + offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end)); + return true; + + } + + @Override + public final void end() throws IOException { + super.end(); + // set final offset + offsetAtt.setOffset(finalOffset, finalOffset); + } + + @Override + public void reset() throws IOException { + super.reset(); + bufferIndex = 0; + offset = 0; + dataLen = 0; + finalOffset = 0; + ioBuffer.reset(); // make sure to reset the IO buffer!! + } + +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java similarity index 71% rename from modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java index 8c913a33cfe4c..4cd5b07fe484a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java @@ -20,26 +20,21 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenizerFactory; -import org.elasticsearch.index.analysis.MultiTermAwareComponent; -public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent { +@Deprecated +// NORELEASE we should prevent the usage on indices created after 7.0 in order to be able to remove in 8 +public class XLowerCaseTokenizerFactory extends AbstractTokenizerFactory { - LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + public XLowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, settings); } @Override public Tokenizer create() { - return new LowerCaseTokenizer(); - } - - @Override - public Object getMultiTermComponent() { - return this; + return new XLowerCaseTokenizer(); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index f9fca66cc54a1..99e882c622085 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -48,7 +48,7 @@ protected Map> getTokenizers() { tokenizers.put("edgengram", EdgeNGramTokenizerFactory.class); tokenizers.put("classic", ClassicTokenizerFactory.class); tokenizers.put("letter", LetterTokenizerFactory.class); - tokenizers.put("lowercase", LowerCaseTokenizerFactory.class); + // tokenizers.put("lowercase", XLowerCaseTokenizerFactory.class); tokenizers.put("pathhierarchy", PathHierarchyTokenizerFactory.class); tokenizers.put("pattern", PatternTokenizerFactory.class); tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class); @@ -223,7 +223,7 @@ protected Map> getPreConfiguredTokenFilters() { protected Map> getPreConfiguredTokenizers() { Map> tokenizers = new TreeMap<>(super.getPreConfiguredTokenizers()); tokenizers.put("keyword", null); - tokenizers.put("lowercase", null); + tokenizers.put("lowercase", Void.class); tokenizers.put("classic", null); tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class); tokenizers.put("path_hierarchy", null); diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..b536c887eab0c --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +8db13c6e146c851614c9f862f1eac67431f9b509 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 4904c89e62f89..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc072b68aac06a2fb9569ab7adce05302f130948 \ No newline at end of file diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index d95c9899c89ad..67e0fad53ec49 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -75,7 +75,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; @@ -149,7 +149,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); } @@ -173,7 +173,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -202,7 +202,7 @@ public void testCoerce() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -317,7 +317,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(25, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 4b46537bb1650..581c6fd494286 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -327,7 +327,7 @@ Tuple, Map>> extractTermsAndRanges(IndexRead extractedTerms.add(builder.toBytesRef()); } } - if (info.getPointDimensionCount() == 1) { // not != 0 because range fields are not supported + if (info.getPointIndexDimensionCount() == 1) { // not != 0 because range fields are not supported PointValues values = reader.getPointValues(info.name); List encodedPointValues = new ArrayList<>(); encodedPointValues.add(values.getMinPackedValue().clone()); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 3d9a8fb8ebb08..07f47df41e60d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -38,7 +38,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiDocValues; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PostingsEnum; @@ -1090,7 +1090,7 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd String queryToString = shardSearcher.doc(controlTopDocs.scoreDocs[i].doc).get("query_to_string"); logger.error("controlTopDocs.scoreDocs[{}].query_to_string={}", i, queryToString); - TermsEnum tenum = MultiFields.getFields(shardSearcher.getIndexReader()).terms(fieldType.queryTermsField.name()).iterator(); + TermsEnum tenum = MultiTerms.getTerms(shardSearcher.getIndexReader(), fieldType.queryTermsField.name()).iterator(); StringBuilder builder = new StringBuilder(); for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { PostingsEnum penum = tenum.postings(null); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..65e5ca3382240 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b474e1a2d7f0172338a08f159849a6c491781d70 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index abc772945b1b4..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -429eb7e780c5a6e5200041a1f5b98bccd2623aaf \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..51fb0eebff73c --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +fc547e69837bcb808f1782bfa35490645bab9cae \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index e103c8c0c7c41..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -837fca1b1d7ca1dc002e53171801526644e52818 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3389dc2f73ea1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +e08961a2ec9414947693659ff79bb7e21a410298 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index b7a23ee518fcb..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dde903172ade259cb26cbe320c25bc1d1356f89 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..b0854f657867a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +09280919225656c7ce2a14af29666a02bd86c540 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 08b07e7c2f498..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6ca20e96a989e6e6706b8b7b8ad8c82d2a03576 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..00860c9fc832e --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +880f10393cdefff7575fbf5b2ced890666ec81dc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 3f6fed19af1aa..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c96a2f25dea18b383423a41aca296734353d4bbd \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..4818fd1665f27 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b41451a9d4e30b8a9a14ccdd7553e5796f77cf44 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 5dc03672c8753..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09363c5ce111d024a6da22a5ea8dbaf54d91dbd0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1b4f444999f58 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +145fd2c803d682c2cb2d78e6e350e09a09a09ea0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index e940b50d640e1..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13c3840d49480014118de99ef6e07a9e55c50172 \ No newline at end of file diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index c9448dd88e756..79fefbc64d407 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -20,12 +20,9 @@ package org.elasticsearch.index.mapper.annotatedtext; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.Analyzer.TokenStreamComponents; import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -69,20 +66,21 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + * * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + * * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points * in the token stream. - * This code is largely a copy of TextFieldMapper which is less than ideal - + * This code is largely a copy of TextFieldMapper which is less than ideal - * my attempts to subclass TextFieldMapper failed but we can revisit this. **/ public class AnnotatedTextFieldMapper extends FieldMapper { @@ -100,7 +98,7 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private int positionIncrementGap = POSITION_INCREMENT_GAP_USE_ANALYZER; - + public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; @@ -118,7 +116,7 @@ public Builder positionIncrementGap(int positionIncrementGap) { this.positionIncrementGap = positionIncrementGap; return this; } - + @Override public Builder docValues(boolean docValues) { if (docValues) { @@ -141,8 +139,8 @@ public AnnotatedTextFieldMapper build(BuilderContext context) { fieldType.setSearchAnalyzer(new NamedAnalyzer(fieldType.searchAnalyzer(), positionIncrementGap)); fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap)); } else { - //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory - // does to splice in new default of posIncGap=100 by wrapping the analyzer + //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory + // does to splice in new default of posIncGap=100 by wrapping the analyzer if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { int overrideInc = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; fieldType.setIndexAnalyzer(new NamedAnalyzer(fieldType.indexAnalyzer(), overrideInc)); @@ -162,7 +160,7 @@ public static class TypeParser implements Mapper.TypeParser { public Mapper.Builder parse( String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { AnnotatedTextFieldMapper.Builder builder = new AnnotatedTextFieldMapper.Builder(fieldName); - + builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer()); builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer()); @@ -181,7 +179,7 @@ public Mapper.Builder annotations; - + // Format is markdown-like syntax for URLs eg: // "New mayor is [John Smith](type=person&value=John%20Smith) " - static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); - + static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); + public static AnnotatedText parse (String textPlusMarkup) { List annotations =new ArrayList<>(); - Matcher m = markdownPattern.matcher(textPlusMarkup); + Matcher m = markdownPattern.matcher(textPlusMarkup); int lastPos = 0; StringBuilder sb = new StringBuilder(); while(m.find()){ if(m.start() > lastPos){ sb.append(textPlusMarkup.substring(lastPos, m.start())); } - + int startOffset = sb.length(); int endOffset = sb.length() + m.group(1).length(); sb.append(m.group(1)); lastPos = m.end(); - + String[] pairs = m.group(2).split("&"); String value = null; for (String pair : pairs) { String[] kv = pair.split("="); try { - if(kv.length == 2){ + if(kv.length == 2){ throw new ElasticsearchParseException("key=value pairs are not supported in annotations"); } if(kv.length == 1) { @@ -230,9 +228,9 @@ public static AnnotatedText parse (String textPlusMarkup) { } } catch (UnsupportedEncodingException uee){ throw new ElasticsearchParseException("Unsupported encoding parsing annotated text", uee); - } - } - } + } + } + } if(lastPos < textPlusMarkup.length()){ sb.append(textPlusMarkup.substring(lastPos)); } @@ -242,13 +240,13 @@ public static AnnotatedText parse (String textPlusMarkup) { protected AnnotatedText(String textMinusMarkup, String textPlusMarkup, List annotations) { this.textMinusMarkup = textMinusMarkup; this.textPlusMarkup = textPlusMarkup; - this.annotations = annotations; + this.annotations = annotations; } - + public static final class AnnotationToken { public final int offset; public final int endOffset; - + public final String value; public AnnotationToken(int offset, int endOffset, String value) { this.offset = offset; @@ -259,12 +257,12 @@ public AnnotationToken(int offset, int endOffset, String value) { public String toString() { return value +" ("+offset+" - "+endOffset+")"; } - + public boolean intersects(int start, int end) { return (start <= offset && end >= offset) || (start <= endOffset && end >= endOffset) || (start >= offset && end <= endOffset); } - + @Override public int hashCode() { final int prime = 31; @@ -274,7 +272,7 @@ public int hashCode() { result = prime * result + Objects.hashCode(value); return result; } - + @Override public boolean equals(Object obj) { if (this == obj) @@ -287,16 +285,16 @@ public boolean equals(Object obj) { return Objects.equals(endOffset, other.endOffset) && Objects.equals(offset, other.offset) && Objects.equals(value, other.value); } - + } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(textMinusMarkup); sb.append("\n"); annotations.forEach(a -> { - sb.append(a); + sb.append(a); sb.append("\n"); }); return sb.toString(); @@ -308,10 +306,10 @@ public int numAnnotations() { public AnnotationToken getAnnotation(int index) { return annotations.get(index); - } + } } - - // A utility class for use with highlighters where the content being highlighted + + // A utility class for use with highlighters where the content being highlighted // needs plain text format for highlighting but marked-up format for token discovery. // The class takes markedup format field values and returns plain text versions. // When asked to tokenize plain-text versions by the highlighter it tokenizes the @@ -330,7 +328,7 @@ public void init(String[] markedUpFieldValues) { annotations[i] = AnnotatedText.parse(markedUpFieldValues[i]); } } - + public String [] getPlainTextValuesForHighlighter(){ String [] result = new String[annotations.length]; for (int i = 0; i < annotations.length; i++) { @@ -338,127 +336,75 @@ public void init(String[] markedUpFieldValues) { } return result; } - + public AnnotationToken[] getIntersectingAnnotations(int start, int end) { List intersectingAnnotations = new ArrayList<>(); int fieldValueOffset =0; for (AnnotatedText fieldValueAnnotations : this.annotations) { //This is called from a highlighter where all of the field values are concatenated - // so each annotation offset will need to be adjusted so that it takes into account + // so each annotation offset will need to be adjusted so that it takes into account // the previous values AND the MULTIVAL delimiter for (AnnotationToken token : fieldValueAnnotations.annotations) { if(token.intersects(start - fieldValueOffset , end - fieldValueOffset)) { - intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, + intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, token.endOffset + fieldValueOffset, token.value)); } - } + } //add 1 for the fieldvalue separator character fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; } return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); - } - + } + @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; - } - + } + @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - if(components instanceof AnnotatedHighlighterTokenStreamComponents){ - // already wrapped. - return components; - } AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); - return new AnnotatedHighlighterTokenStreamComponents(components.getTokenizer(), injector, this.annotations); - } - } - private static final class AnnotatedHighlighterTokenStreamComponents extends TokenStreamComponents{ - - private AnnotationsInjector annotationsInjector; - private AnnotatedText[] annotations; - int readerNum = 0; - - AnnotatedHighlighterTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsFilter, - AnnotatedText[] annotations) { - super(source, annotationsFilter); - this.annotationsInjector = annotationsFilter; - this.annotations = annotations; + AtomicInteger readerNum = new AtomicInteger(0); + return new TokenStreamComponents(r -> { + String plainText = readToString(r); + AnnotatedText at = this.annotations[readerNum.getAndIncrement()]; + assert at.textMinusMarkup.equals(plainText); + injector.setAnnotations(at); + components.getSource().accept(new StringReader(at.textMinusMarkup)); + }, injector); } + } - @Override - protected void setReader(Reader reader) { - String plainText = readToString(reader); - AnnotatedText at = this.annotations[readerNum++]; - assert at.textMinusMarkup.equals(plainText); - // This code is reliant on the behaviour of highlighter logic - it - // takes plain text multi-value fields and then calls the same analyzer - // for each field value in turn. This class has cached the annotations - // associated with each plain-text value and are arranged in the same order - annotationsInjector.setAnnotations(at); - super.setReader(new StringReader(at.textMinusMarkup)); - } - - } - - public static final class AnnotationAnalyzerWrapper extends AnalyzerWrapper { - private final Analyzer delegate; - public AnnotationAnalyzerWrapper (Analyzer delegate) { + public AnnotationAnalyzerWrapper(Analyzer delegate) { super(delegate.getReuseStrategy()); this.delegate = delegate; } - /** - * Wraps {@link StandardAnalyzer}. - */ - public AnnotationAnalyzerWrapper() { - this(new StandardAnalyzer()); - } - - @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; - } + } @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - if(components instanceof AnnotatedTokenStreamComponents){ - // already wrapped. + if (components.getTokenStream() instanceof AnnotationsInjector) { + // already wrapped return components; } AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); - return new AnnotatedTokenStreamComponents(components.getTokenizer(), injector); - } - } - - - //This Analyzer is not "wrappable" because of a limitation in Lucene https://issues.apache.org/jira/browse/LUCENE-8352 - private static final class AnnotatedTokenStreamComponents extends TokenStreamComponents{ - private AnnotationsInjector annotationsInjector; - - AnnotatedTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsInjector) { - super(source, annotationsInjector); - this.annotationsInjector = annotationsInjector; - } - - @Override - protected void setReader(Reader reader) { - // Sneaky code to change the content downstream components will parse. - // Replace the marked-up content Reader with a plain text Reader and prime the - // annotations injector with the AnnotatedTokens that need to be injected - // as plain-text parsing progresses. - AnnotatedText annotations = AnnotatedText.parse(readToString(reader)); - annotationsInjector.setAnnotations(annotations); - super.setReader(new StringReader(annotations.textMinusMarkup)); + return new TokenStreamComponents(r -> { + AnnotatedText annotations = AnnotatedText.parse(readToString(r)); + injector.setAnnotations(annotations); + components.getSource().accept(new StringReader(annotations.textMinusMarkup)); + }, injector); } } - - static String readToString(Reader reader) { + + static String readToString(Reader reader) { char[] arr = new char[8 * 1024]; StringBuilder buffer = new StringBuilder(); int numCharsRead; @@ -467,15 +413,15 @@ static String readToString(Reader reader) { buffer.append(arr, 0, numCharsRead); } reader.close(); - return buffer.toString(); + return buffer.toString(); } catch (IOException e) { throw new UncheckedIOException("IO Error reading field content", e); } - } + } + - public static final class AnnotationsInjector extends TokenFilter { - + private AnnotatedText annotatedText; AnnotatedText.AnnotationToken nextAnnotationForInjection = null; private int currentAnnotationIndex = 0; @@ -502,8 +448,8 @@ public void setAnnotations(AnnotatedText annotatedText) { nextAnnotationForInjection = null; } } - - + + @Override public void reset() throws IOException { @@ -512,7 +458,7 @@ public void reset() throws IOException { inputExhausted = false; super.reset(); } - + // Abstracts if we are pulling from some pre-cached buffer of // text tokens or directly from the wrapped TokenStream private boolean internalNextToken() throws IOException{ @@ -524,7 +470,7 @@ private boolean internalNextToken() throws IOException{ pendingStates.clear(); } return true; - } + } if(inputExhausted) { return false; } @@ -579,28 +525,28 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th posLenAtt.setPositionLength(annotationPosLen); textOffsetAtt.setOffset(nextAnnotationForInjection.offset, nextAnnotationForInjection.endOffset); setType(nextAnnotationForInjection); - + // We may have multiple annotations at this location - stack them up final int annotationOffset = nextAnnotationForInjection.offset; final AnnotatedText.AnnotationToken firstAnnotationAtThisPos = nextAnnotationForInjection; while (nextAnnotationForInjection != null && nextAnnotationForInjection.offset == annotationOffset) { - + setType(nextAnnotationForInjection); termAtt.resizeBuffer(nextAnnotationForInjection.value.length()); termAtt.copyBuffer(nextAnnotationForInjection.value.toCharArray(), 0, nextAnnotationForInjection.value.length()); - + if (nextAnnotationForInjection == firstAnnotationAtThisPos) { posAtt.setPositionIncrement(firstSpannedTextPosInc); //Put at the head of the queue of tokens to be emitted - pendingStates.add(0, captureState()); + pendingStates.add(0, captureState()); } else { - posAtt.setPositionIncrement(0); + posAtt.setPositionIncrement(0); //Put after the head of the queue of tokens to be emitted - pendingStates.add(1, captureState()); + pendingStates.add(1, captureState()); } - - + + // Flag the inject annotation as null to prevent re-injection. currentAnnotationIndex++; if (currentAnnotationIndex < annotatedText.numAnnotations()) { @@ -614,7 +560,7 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th } } - + public static final class AnnotatedTextFieldType extends StringFieldType { @@ -625,7 +571,7 @@ public AnnotatedTextFieldType() { protected AnnotatedTextFieldType(AnnotatedTextFieldType ref) { super(ref); } - + @Override public void setIndexAnalyzer(NamedAnalyzer delegate) { if(delegate.analyzer() instanceof AnnotationAnalyzerWrapper){ @@ -655,7 +601,7 @@ public Query existsQuery(QueryShardContext context) { return new NormsFieldExistsQuery(name()); } } - + @Override public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePosIncrements) throws IOException { PhraseQuery.Builder builder = new PhraseQuery.Builder(); @@ -678,7 +624,7 @@ public Query phraseQuery(String field, TokenStream stream, int slop, boolean ena return builder.build(); } - + @Override public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { @@ -713,12 +659,12 @@ public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolea mpqb.add(multiTerms.toArray(new Term[0])); } return mpqb.build(); - } + } } - + private int positionIncrementGap; protected AnnotatedTextFieldMapper(String simpleName, AnnotatedTextFieldType fieldType, MappedFieldType defaultFieldType, - int positionIncrementGap, + int positionIncrementGap, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); assert fieldType.tokenized(); @@ -774,6 +720,6 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { builder.field("position_increment_gap", positionIncrementGap); - } + } } } diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java index 2fcf917ab1d79..ca29521802fe2 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java @@ -57,7 +57,7 @@ import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; import static org.hamcrest.CoreMatchers.equalTo; -public class AnnotatedTextHighlighterTests extends ESTestCase { +public class AnnotatedTextHighlighterTests extends ESTestCase { private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, Query query, Locale locale, BreakIterator breakIterator, diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 6566063d220d3..dc14373026430 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -63,7 +63,7 @@ public void testSizeEnabled() throws Exception { boolean points = false; for (IndexableField field : doc.rootDoc().getFields("_size")) { stored |= field.fieldType().stored(); - points |= field.fieldType().pointDimensionCount() > 0; + points |= field.fieldType().pointIndexDimensionCount() > 0; } assertTrue(stored); assertTrue(points); diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3e54326a6c787 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +6bb87c96d76cdc70be77261d39376613b0a8860c \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 4d9522f10de5b..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dce55e44af096cb9029cb26d22a14d8a9c5223ce \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..187572e525147 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +1b29b3e3b080ec32073c007a1940e5aa7b195316 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index c86294acf5a3e..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1d941758dc91ea7c2d515dd97b5d9b23b0f1874 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..68553b80b1a1b --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 75200bc0c1525..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..75c05f55ed83b --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +c918cc5ac54e5a4dba4740e9e45a93ebd3c95c77 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index b1ae597fadfb7..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3870972c07d7fa41a3bc58eb65952da53a16a406 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..afd8b925614fe --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +6cff1fa9ac25c840589d9a39a42ed4629b594cf4 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 02935671ce899..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8f0b73cfd01fc48735f1e06f16f7ccb47fc183e \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..6b525fa5ea64b --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +2a843337e03493ab5f3498b5dd232fa9abb9e765 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index fdfab321a6791..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d253fae720355e2ff40d529d62c2b3de403d0d0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..9487a7fa579a0 --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +afda00bbee5fb8b4c36867eabb83267b3b2b8c10 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index d7c9cdf3e41d6..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9ca14bcda331a425d2d7c16022fdfd1c6942924 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3e6fe1ce378c4 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +a2d8bc6a0486cfa6b4de8c1103017b35c0193544 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 93ec704aeaeb0..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -200454bbfe5ec93d941d9a9d27703883122a4522 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..dbb72428046fd --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +79a3b80245a9cf00f24f5d6e298a8e1a887760f1 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index d57b6be7fbf31..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47915a125e54c845a4b540201cda88dc7612da08 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..db1d47c8307d0 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +37c9970ec38f64e7ccecbe17efbabdaabe8da2ea \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 0ed04b6f69b41..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5d49e1c6ee7550234539314e600e2893e13cb80 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..0e7ba7aeb9e94 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +7103c3482c728a9788922aa39e39a5ed2bdd3a11 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 41c6a4a243ed7..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68081b60905f1b53b3705b9cfa4403b8aba44352 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..bba0f7269e45e --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +89d389c1020fac58f462819ad822c9b09e52f563 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 63734717b2fbc..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c99d56a453cecc7258300fd04b438713b944f1b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1d8884aa8f23d --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b62e34e522f3afa9c3f1655b97b995ff6ba2592d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 3fa056da3db0a..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2471966478f829b6455556346014f02ff59f50c0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1ff50782c1780 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +0c92f6b03eb226586b431a834dca90a1f2cd85b8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index bd3d2e719a0ae..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46e012be699251306ad13f4582c30d79cea4b307 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..dd4d9e0665e6c --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3a659287ba728f7a0d81694ce32e9ef741a13c19 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 8a4fc23cfcdae..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dea19dd9e971d2a0171e7d78662f732b45148a27 \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 563414171e98f..a4791e85ef3ca 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -53,7 +53,7 @@ public CustomFieldQuery(Query query, IndexReader reader, boolean phraseHighlight } @Override - void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { + protected void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { if (sourceQuery instanceof BoostQuery) { BoostQuery bq = (BoostQuery) sourceQuery; sourceQuery = bq.getQuery(); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index 1920db12117d4..1010c917eca82 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -38,10 +38,10 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Document; +import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -609,7 +609,7 @@ public void setMaxNumTokensParsed(int i) { public Query like(int docNum) throws IOException { if (fieldNames == null) { // gather list of valid fields from lucene - Collection fields = MultiFields.getIndexedFields(ir); + Collection fields = FieldInfos.getIndexedFields(ir); fieldNames = fields.toArray(new String[fields.size()]); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index 167ec9ce26b33..c7e11e85f7da5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -54,13 +54,13 @@ final class TranslogLeafReader extends LeafReader { private final Translog.Index operation; private static final FieldInfo FAKE_SOURCE_FIELD = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private final Version indexVersionCreated; TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 04480de70a8e4..eaafeefa7e0dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -409,7 +409,7 @@ protected final void failIfNoDocValues() { } protected final void failIfNotIndexed() { - if (indexOptions() == IndexOptions.NONE && pointDimensionCount() == 0) { + if (indexOptions() == IndexOptions.NONE && pointDataDimensionCount() == 0) { // we throw an IAE rather than an ISE so that it translates to a 4xx code rather than 5xx code on the http layer throw new IllegalArgumentException("Cannot search on field [" + name() + "] since it is not indexed."); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 7851bb1655ad0..d0419a0e44b24 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -268,7 +268,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - return new TokenStreamComponents(components.getTokenizer(), new FixedShingleFilter(components.getTokenStream(), 2)); + return new TokenStreamComponents(components.getSource(), new FixedShingleFilter(components.getTokenStream(), 2)); } } @@ -293,7 +293,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); - return new TokenStreamComponents(components.getTokenizer(), filter); + return new TokenStreamComponents(components.getSource(), filter); } } diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index bc77626b94277..447ec9003a4ac 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -22,8 +22,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.memory.MemoryIndex; @@ -98,7 +99,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm) .version(request.version()).versionType(request.versionType())); Engine.Searcher searcher = indexShard.acquireSearcher("term_vector")) { - Fields topLevelFields = MultiFields.getFields(get.searcher() != null ? get.searcher().reader() : searcher.reader()); + Fields topLevelFields = fields(get.searcher() != null ? get.searcher().reader() : searcher.reader()); DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); /* from an artificial document */ if (request.doc() != null) { @@ -152,6 +153,25 @@ else if (docIdAndVersion != null) { return termVectorsResponse; } + public static Fields fields(IndexReader reader) { + return new Fields() { + @Override + public Iterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public Terms terms(String field) throws IOException { + return MultiTerms.getTerms(reader, field); + } + + @Override + public int size() { + throw new UnsupportedOperationException(); + } + }; + } + private static void handleFieldWildcards(IndexShard indexShard, TermVectorsRequest request) { Set fieldNames = new HashSet<>(); for (String pattern : request.selectedFields()) { @@ -270,7 +290,7 @@ private static Fields generateTermVectors(IndexShard indexShard, Map> innerExecute(String name, P for (int i = 0; i < numGenerators; i++) { PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i); DirectSpellChecker directSpellChecker = generator.createDirectSpellChecker(); - Terms terms = MultiFields.getTerms(indexReader, generator.field()); + Terms terms = MultiTerms.getTerms(indexReader, generator.field()); if (terms != null) { gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(), indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms)); } } final String suggestField = suggestion.getField(); - final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField); + final Terms suggestTerms = MultiTerms.getTerms(indexReader, suggestField); if (gens.size() > 0 && suggestTerms != null) { final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit()); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java index 1bdf1c90d7d09..b13f33f76394b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; @@ -45,7 +45,7 @@ public abstract class WordScorer { private final boolean useTotalTermFreq; public WordScorer(IndexReader reader, String field, double realWordLikelyHood, BytesRef separator) throws IOException { - this(reader, MultiFields.getTerms(reader, field), field, realWordLikelyHood, separator); + this(reader, MultiTerms.getTerms(reader, field), field, realWordLikelyHood, separator); } public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelyHood, BytesRef separator) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index d16bdc444e6e7..97921f57ca592 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -77,7 +77,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(8, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(1457654400000L, pointField.numericValue().longValue()); @@ -128,7 +128,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); } public void testStore() throws Exception { @@ -150,7 +150,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); IndexableField storedField = fields[2]; @@ -304,7 +304,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(8, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(1457654400000L, pointField.numericValue().longValue()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index a20c88fe69366..c5eded8f5ab11 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -78,7 +78,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(16, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), pointField.binaryValue()); @@ -129,7 +129,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), pointField.binaryValue()); } @@ -152,7 +152,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_SET, dvField.fieldType().docValuesType()); IndexableField storedField = fields[2]; @@ -240,7 +240,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(16, pointField.fieldType().pointNumBytes()); assertFalse(pointField.fieldType().stored()); assertEquals(new BytesRef(InetAddressPoint.encode(InetAddresses.forString("::1"))), pointField.binaryValue()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java index e16b04748a18b..2c70b25d6a446 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java @@ -71,7 +71,7 @@ public void testStoreCidr() throws Exception { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); IndexableField storedField = fields[2]; assertTrue(storedField.fieldType().stored()); String strVal = diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index 9167c0d5a7d97..8b8e174dba83c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -66,7 +66,7 @@ public void doTestDefaults(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; @@ -117,7 +117,7 @@ public void doTestNoDocValues(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); } @@ -141,7 +141,7 @@ public void doTestStore(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -170,7 +170,7 @@ public void doTestCoerce(String type) throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -344,7 +344,7 @@ protected void doTestNullValue(String type) throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(123, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 00068f76e753d..1f8b0b58af813 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -136,7 +136,7 @@ public void doTestDefaults(String type) throws Exception { assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); } @@ -188,7 +188,7 @@ protected void doTestNoDocValues(String type) throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); } @Override @@ -216,7 +216,7 @@ protected void doTestStore(String type) throws Exception { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); IndexableField storedField = fields[2]; assertTrue(storedField.fieldType().stored()); String strVal = "5"; @@ -255,7 +255,7 @@ public void doTestCoerce(String type) throws IOException { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); // date_range ignores the coerce parameter and epoch_millis date format truncates floats (see issue: #14641) if (type.equals("date_range") == false) { @@ -353,7 +353,7 @@ protected void doTestNullValue(String type) throws IOException { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); storedField = fields[2]; assertTrue(storedField.fieldType().stored()); @@ -406,7 +406,7 @@ public void doTestNoBounds(String type) throws IOException { IndexableField dvField = fields[0]; assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); IndexableField pointField = fields[1]; - assertEquals(2, pointField.fieldType().pointDimensionCount()); + assertEquals(2, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); IndexableField storedField = fields[2]; assertTrue(storedField.fieldType().stored()); diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 69bb5943a7c98..1067ed62db46e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -21,9 +21,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedBinaryTokenStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; @@ -52,7 +49,6 @@ import org.hamcrest.Matcher; import java.io.IOException; -import java.io.Reader; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -69,7 +65,7 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase getAlternateVersions() { Map alternateVersions = new HashMap<>(); MatchQueryBuilder matchQuery = new MatchQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)); String contentString = "{\n" + - " \"match\" : {\n" + - " \"" + matchQuery.fieldName() + "\" : \"" + matchQuery.value() + "\"\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"" + matchQuery.fieldName() + "\" : \"" + matchQuery.value() + "\"\n" + + " }\n" + + "}"; alternateVersions.put(contentString, matchQuery); return alternateVersions; } @@ -238,7 +234,7 @@ public void testIllegalValues() { { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> matchQuery.maxExpansions(randomIntBetween(-10, 0))); + () -> matchQuery.maxExpansions(randomIntBetween(-10, 0))); assertEquals("[match] requires maxExpansions to be positive.", e.getMessage()); } @@ -261,20 +257,20 @@ public void testIllegalValues() { public void testSimpleMatchQuery() throws IOException { String json = "{\n" + - " \"match\" : {\n" + - " \"message\" : {\n" + - " \"query\" : \"to be or not to be\",\n" + - " \"operator\" : \"AND\",\n" + - " \"prefix_length\" : 0,\n" + - " \"max_expansions\" : 50,\n" + - " \"fuzzy_transpositions\" : true,\n" + - " \"lenient\" : false,\n" + - " \"zero_terms_query\" : \"ALL\",\n" + - " \"auto_generate_synonyms_phrase_query\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message\" : {\n" + + " \"query\" : \"to be or not to be\",\n" + + " \"operator\" : \"AND\",\n" + + " \"prefix_length\" : 0,\n" + + " \"max_expansions\" : 50,\n" + + " \"fuzzy_transpositions\" : true,\n" + + " \"lenient\" : false,\n" + + " \"zero_terms_query\" : \"ALL\",\n" + + " \"auto_generate_synonyms_phrase_query\" : true,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}"; MatchQueryBuilder qb = (MatchQueryBuilder) parseQuery(json); checkGeneratedJson(json, qb); @@ -287,14 +283,14 @@ public void testFuzzinessOnNonStringField() throws Exception { query.fuzziness(randomFuzziness(INT_FIELD_NAME)); QueryShardContext context = createShardContext(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> query.toQuery(context)); + () -> query.toQuery(context)); assertEquals("Can only use fuzzy queries on keyword and text fields - not on [mapped_int] which is of type [integer]", - e.getMessage()); + e.getMessage()); query.analyzer("keyword"); // triggers a different code path e = expectThrows(IllegalArgumentException.class, - () -> query.toQuery(context)); + () -> query.toQuery(context)); assertEquals("Can only use fuzzy queries on keyword and text fields - not on [mapped_int] which is of type [integer]", - e.getMessage()); + e.getMessage()); query.lenient(true); query.toQuery(context); // no exception @@ -313,43 +309,43 @@ public void testExactOnUnsupportedField() throws Exception { public void testParseFailsWithMultipleFields() throws IOException { String json = "{\n" + - " \"match\" : {\n" + - " \"message1\" : {\n" + - " \"query\" : \"this is a test\"\n" + - " },\n" + - " \"message2\" : {\n" + - " \"query\" : \"this is a test\"\n" + - " }\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : {\n" + + " \"query\" : \"this is a test\"\n" + + " },\n" + + " \"message2\" : {\n" + + " \"query\" : \"this is a test\"\n" + + " }\n" + + " }\n" + + "}"; ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); assertEquals("[match] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); String shortJson = "{\n" + - " \"match\" : {\n" + - " \"message1\" : \"this is a test\",\n" + - " \"message2\" : \"this is a test\"\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : \"this is a test\",\n" + + " \"message2\" : \"this is a test\"\n" + + " }\n" + + "}"; e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); assertEquals("[match] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } public void testParseFailsWithTermsArray() throws Exception { String json1 = "{\n" + - " \"match\" : {\n" + - " \"message1\" : {\n" + - " \"query\" : [\"term1\", \"term2\"]\n" + - " }\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : {\n" + + " \"query\" : [\"term1\", \"term2\"]\n" + + " }\n" + + " }\n" + + "}"; expectThrows(ParsingException.class, () -> parseQuery(json1)); String json2 = "{\n" + - " \"match\" : {\n" + - " \"message1\" : [\"term1\", \"term2\"]\n" + - " }\n" + - "}"; + " \"match\" : {\n" + + " \"message1\" : [\"term1\", \"term2\"]\n" + + " }\n" + + "}"; expectThrows(IllegalStateException.class, () -> parseQuery(json2)); } @@ -364,9 +360,9 @@ public void testExceptionUsingAnalyzerOnNumericField() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge("_doc", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef( - "_doc", - "string_boost", "type=text,boost=4", "string_no_pos", - "type=text,index_options=docs")) + "_doc", + "string_boost", "type=text,boost=4", "string_no_pos", + "type=text,index_options=docs")) ), MapperService.MergeReason.MAPPING_UPDATE); } @@ -408,26 +404,18 @@ public void testMaxBooleanClause() { query.setAnalyzer(new MockGraphAnalyzer(createGiantGraphMultiTerms())); expectThrows(BooleanQuery.TooManyClauses.class, () -> query.parse(Type.PHRASE, STRING_FIELD_NAME, "")); } - + private static class MockGraphAnalyzer extends Analyzer { - final CannedBinaryTokenStream.BinaryToken[] tokens; - private MockGraphAnalyzer(CannedBinaryTokenStream.BinaryToken[] tokens ) { - this.tokens = tokens; + CannedBinaryTokenStream tokenStream; + + MockGraphAnalyzer(CannedBinaryTokenStream.BinaryToken[] tokens) { + this.tokenStream = new CannedBinaryTokenStream(tokens); } + @Override protected TokenStreamComponents createComponents(String fieldName) { - Tokenizer tokenizer = new MockTokenizer(MockTokenizer.SIMPLE, true); - return new TokenStreamComponents(tokenizer) { - @Override - public TokenStream getTokenStream() { - return new CannedBinaryTokenStream(tokens); - } - - @Override - protected void setReader(final Reader reader) { - } - }; + return new TokenStreamComponents(r -> {}, tokenStream); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index 72898dd3911cd..1a4e69af25342 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.index.Fields; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -256,7 +255,7 @@ private static Fields generateFields(String[] fieldNames, String text) throws IO for (String fieldName : fieldNames) { index.addField(fieldName, text, new WhitespaceAnalyzer()); } - return MultiFields.getFields(index.createSearcher().getIndexReader()); + return index.createSearcher().getIndexReader().getTermVectors(0); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java index e3ae802baba9b..72bd6d1fe2c87 100644 --- a/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java +++ b/server/src/test/java/org/elasticsearch/search/lookup/LeafFieldsLookupTests.java @@ -56,7 +56,7 @@ public void setUp() throws Exception { when(mapperService.fullName("alias")).thenReturn(fieldType); FieldInfo mockFieldInfo = new FieldInfo("field", 1, false, false, true, - IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, false); + IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); LeafReader leafReader = mock(LeafReader.class); doAnswer(invocation -> { diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java index 171bb0bf1697f..d819d880c86d3 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java @@ -36,7 +36,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.search.spell.DirectSpellChecker; import org.apache.lucene.search.spell.SuggestMode; import org.apache.lucene.store.Directory; @@ -110,7 +110,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, + WordScorer wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); @@ -135,7 +135,7 @@ protected TokenStreamComponents createComponents(String fieldName) { assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE)); suggester = new NoisyChannelSpellChecker(0.85); - wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections; @@ -159,7 +159,7 @@ protected TokenStreamComponents createComponents(String fieldName) { // Test some of the highlighting corner cases suggester = new NoisyChannelSpellChecker(0.85); - wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4, ir, "body", wordScorer, 1, 2).corrections; @@ -196,7 +196,7 @@ protected TokenStreamComponents createComponents(String fieldName) { spellchecker.setMinPrefix(1); spellchecker.setMinQueryLength(1); suggester = new NoisyChannelSpellChecker(0.85); - wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections; @@ -204,7 +204,7 @@ protected TokenStreamComponents createComponents(String fieldName) { assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain america")); generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, - 10, null, analyzer, MultiFields.getTerms(ir, "body")); + 10, null, analyzer, MultiTerms.getTerms(ir, "body")); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); @@ -212,7 +212,7 @@ protected TokenStreamComponents createComponents(String fieldName) { // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, - 10, null, analyzer, MultiFields.getTerms(ir, "body")); + 10, null, analyzer, MultiTerms.getTerms(ir, "body")); corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); @@ -280,7 +280,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } DirectoryReader ir = DirectoryReader.open(writer); - LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, + LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); DirectSpellChecker spellchecker = new DirectSpellChecker(); @@ -288,7 +288,7 @@ protected TokenStreamComponents createComponents(String fieldName) { DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10); DirectCandidateGenerator reverse = new DirectCandidateGenerator(spellchecker, "body_reverse", SuggestMode.SUGGEST_ALWAYS, ir, - 0.95, 10, wrapper, wrapper, MultiFields.getTerms(ir, "body_reverse")); + 0.95, 10, wrapper, wrapper, MultiTerms.getTerms(ir, "body_reverse")); CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse); Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1, @@ -388,7 +388,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + WordScorer wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); @@ -406,7 +406,7 @@ protected TokenStreamComponents createComponents(String fieldName) { assertThat(corrections.length, equalTo(0)); // assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape")); - wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 3).corrections; @@ -457,20 +457,20 @@ protected TokenStreamComponents createComponents(String fieldName) { spellchecker.setMinPrefix(1); spellchecker.setMinQueryLength(1); suggester = new NoisyChannelSpellChecker(0.95); - wordScorer = new LinearInterpolatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, + wordScorer = new LinearInterpolatingScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5, 0.4, 0.1); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, - 10, null, analyzer, MultiFields.getTerms(ir, "body")); + 10, null, analyzer, MultiTerms.getTerms(ir, "body")); corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections; assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america")); - wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, + wordScorer = new StupidBackoffScorer(ir, MultiTerms.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.4); corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2, ir, "body", wordScorer, 0, 3).corrections; @@ -492,7 +492,7 @@ public void testFewDocsEgdeCase() throws Exception { } try (DirectoryReader ir = DirectoryReader.open(dir)) { - WordScorer wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.95d, + WordScorer wordScorer = new StupidBackoffScorer(ir, MultiTerms.getTerms(ir, "field"), "field", 0.95d, new BytesRef(" "), 0.4f); NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker(); DirectSpellChecker spellchecker = new DirectSpellChecker(); diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java index 5923cd3332e5e..a65c75817a816 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/SmoothingModelTestCase.java @@ -28,7 +28,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lucene.BytesRefs; @@ -118,7 +118,7 @@ public void testBuildWordScorer() throws IOException { writer.addDocument(doc); DirectoryReader ir = DirectoryReader.open(writer); - WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiFields.getTerms(ir, "field"), "field", 0.9d, + WordScorer wordScorer = testModel.buildWordScorerFactory().newScorer(ir, MultiTerms.getTerms(ir, "field"), "field", 0.9d, BytesRefs.toBytesRef(" ")); assertWordScorer(wordScorer, testModel); } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 70a42032ea469..63ec090dcc65b 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -77,7 +77,6 @@ private static String toCamelCase(String s) { .put("edgengram", MovedToAnalysisCommon.class) .put("keyword", MovedToAnalysisCommon.class) .put("letter", MovedToAnalysisCommon.class) - .put("lowercase", MovedToAnalysisCommon.class) .put("ngram", MovedToAnalysisCommon.class) .put("pathhierarchy", MovedToAnalysisCommon.class) .put("pattern", MovedToAnalysisCommon.class) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java index fa7bccf8b08c8..102bcde9dc77b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java @@ -198,7 +198,7 @@ private SegmentCommitInfo syncSegment(SegmentCommitInfo segmentCommitInfo, LiveD List fieldInfoCopy = new ArrayList<>(fieldInfos.size()); for (FieldInfo fieldInfo : fieldInfos) { fieldInfoCopy.add(new FieldInfo(fieldInfo.name, fieldInfo.number, - false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, + false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, fieldInfo.attributes(), 0, 0, 0, fieldInfo.isSoftDeletesField())); } FieldInfos newFieldInfos = new FieldInfos(fieldInfoCopy.toArray(new FieldInfo[0])); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index d2f7d7bdb96b9..2dcfa093c008a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -148,7 +148,7 @@ public void testPoints() throws Exception { assertEquals(Integer.BYTES, points.getBytesPerDimension()); // number of dimensions - assertEquals(1, points.getNumDimensions()); + assertEquals(1, points.getNumIndexDimensions()); // walk the trees: we should see stuff in fieldA AtomicBoolean sawDoc = new AtomicBoolean(false); diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..68553b80b1a1b --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 75200bc0c1525..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file