Skip to content

Commit

Permalink
Remove 6.0 version constant uses (#41965)
Browse files Browse the repository at this point in the history
This commit removes all uses of the 6.0 version constants, since master
no longer needs to know about them.
  • Loading branch information
rjernst authored May 14, 2019
1 parent 45e1e59 commit 0e48bbb
Show file tree
Hide file tree
Showing 64 changed files with 106 additions and 1,024 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ protected org.elasticsearch.action.main.MainResponse createServerTestInstance()
ClusterName clusterName = new ClusterName(randomAlphaOfLength(10));
String nodeName = randomAlphaOfLength(10);
final String date = new Date(randomNonNegativeLong()).toString();
Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_1, Version.CURRENT);
Version version = VersionUtils.randomIndexCompatibleVersion(random());
Build build = new Build(
Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(),
version.toString()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ Query percolateQuery(String name, PercolateQuery.QueryStore queryStore, List<Byt
}
Query filter = null;
if (excludeNestedDocuments) {
filter = Queries.newNonNestedFilter(indexVersion);
filter = Queries.newNonNestedFilter();
}
return new PercolateQuery(name, queryStore, documents, candidateQuery, searcher, filter, verifiedMatchesQuery);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.BitSetIterator;
import org.elasticsearch.Version;
import org.elasticsearch.common.document.DocumentField;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.search.SearchHit;
Expand Down Expand Up @@ -73,8 +72,7 @@ static void innerHitsExecute(Query mainQuery,
for (PercolateQuery percolateQuery : percolateQueries) {
String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
Query nonNestedQuery = Queries.newNonNestedFilter(Version.CURRENT);
Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(nonNestedQuery),
Weight weight = percolatorIndexSearcher.createWeight(percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter()),
ScoreMode.COMPLETE_NO_SCORES, 1f);
Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0));
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -399,26 +399,6 @@ public void testCreateCandidateQuery() throws Exception {
assertThat(t.v1().clauses().get(2).getQuery().toString(), containsString(fieldName + ".extraction_result:failed"));
}

public void testCreateCandidateQuery_oldIndex() throws Exception {
addQueryFieldMappings();

MemoryIndex memoryIndex = new MemoryIndex(false);
memoryIndex.addField("field1", "value1", new WhitespaceAnalyzer());
IndexReader indexReader = memoryIndex.createSearcher().getIndexReader();

Tuple<BooleanQuery, Boolean> t = fieldType.createCandidateQuery(indexReader, Version.CURRENT);
assertTrue(t.v2());
assertEquals(2, t.v1().clauses().size());
assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(CoveringQuery.class));
assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class));

t = fieldType.createCandidateQuery(indexReader, Version.V_6_0_0);
assertTrue(t.v2());
assertEquals(2, t.v1().clauses().size());
assertThat(t.v1().clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class));
assertThat(t.v1().clauses().get(1).getQuery(), instanceOf(TermQuery.class));
}

public void testExtractTermsAndRanges_numberFields() throws Exception {
addQueryFieldMappings();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -333,12 +333,6 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() {
assertThat(result.verified, is(false));
assertThat(result.minimumShouldMatch, equalTo(0));
assertTermsEqual(result.extractions);

result = analyze(booleanQuery, Version.CURRENT);
assertThat(result.matchAllDocs, is(true));
assertThat(result.verified, is(false));
assertThat(result.minimumShouldMatch, equalTo(0));
assertTermsEqual(result.extractions);
}

public void testExactMatch_booleanQuery() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -165,20 +165,6 @@ setup:
docvalue_fields: [ "count" ]
- match: { hits.hits.0.fields.count: [1] }

---
"docvalue_fields with default format":
- skip:
features: warnings
- do:
warnings:
- "[use_field_mapping] is a special format that was only used to ease the transition to 7.x. It has become the default and shouldn't be set explicitly anymore."
search:
body:
docvalue_fields:
- field: "count"
format: "use_field_mapping"
- match: { hits.hits.0.fields.count: [1] }

---
"docvalue_fields with explicit format":

Expand Down
10 changes: 0 additions & 10 deletions server/src/main/java/org/elasticsearch/Version.java
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,6 @@ public class Version implements Comparable<Version>, ToXContentFragment {
*/
public static final int V_EMPTY_ID = 0;
public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST);
public static final int V_6_0_0_ID = 6000099;
public static final Version V_6_0_0 =
new Version(V_6_0_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_0_1_ID = 6000199;
public static final Version V_6_0_1 =
new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_1_0_ID = 6010099;
public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final int V_6_1_1_ID = 6010199;
Expand Down Expand Up @@ -212,10 +206,6 @@ public static Version fromId(int id) {
return V_6_1_1;
case V_6_1_0_ID:
return V_6_1_0;
case V_6_0_1_ID:
return V_6_0_1;
case V_6_0_0_ID:
return V_6_0_0;
case V_EMPTY_ID:
return V_EMPTY;
default:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
Expand Down Expand Up @@ -72,17 +71,9 @@ public static Query newNestedFilter() {

/**
* Creates a new non-nested docs query
* @param indexVersionCreated the index version created since newer indices can identify a parent field more efficiently
*/
public static Query newNonNestedFilter(Version indexVersionCreated) {
if (indexVersionCreated.onOrAfter(Version.V_6_1_0)) {
return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME);
} else {
return new BooleanQuery.Builder()
.add(new MatchAllDocsQuery(), Occur.FILTER)
.add(newNestedFilter(), Occur.MUST_NOT)
.build();
}
public static Query newNonNestedFilter() {
return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME);
}

public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin
}

if (hasNested) {
warmUp.add(Queries.newNonNestedFilter(indexSettings.getIndexVersionCreated()));
warmUp.add(Queries.newNonNestedFilter());
}

final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ public Query termsQuery(List<?> values, QueryShardContext context) {
.anyMatch(indexType::equals)) {
if (context.getMapperService().hasNested()) {
// type filters are expected not to match nested docs
return Queries.newNonNestedFilter(context.indexVersionCreated());
return Queries.newNonNestedFilter();
} else {
return new MatchAllDocsQuery();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException {
Query innerQuery;
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
if (objectMapper == null) {
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated()));
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter());
} else {
parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter());
}
Expand Down Expand Up @@ -388,7 +388,7 @@ public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException {
SearchHit hit = hits[i];
Query rawParentFilter;
if (parentObjectMapper == null) {
rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated());
rawParentFilter = Queries.newNonNestedFilter();
} else {
rawParentFilter = parentObjectMapper.nestedTypeFilter();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@
import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.common.lucene.search.Queries;
Expand All @@ -67,7 +66,7 @@ final class ShardSplittingQuery extends Query {
ShardSplittingQuery(IndexMetaData indexMetaData, int shardId, boolean hasNested) {
this.indexMetaData = indexMetaData;
this.shardId = shardId;
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null;
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer() : null;
}
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) {
Expand Down Expand Up @@ -339,9 +338,9 @@ public float matchCost() {
* than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is
* executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either.
*/
private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) {
private static BitSetProducer newParentDocBitSetProducer() {
return context -> {
Query query = Queries.newNonNestedFilter(indexVersionCreated);
Query query = Queries.newNonNestedFilter();
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ public Query buildFilteredQuery(Query query) {
&& typeFilter == null // when a _type filter is set, it will automatically exclude nested docs
&& new NestedHelper(mapperService()).mightMatchNestedDocs(query)
&& (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) {
filters.add(Queries.newNonNestedFilter(mapperService().getIndexSettings().getIndexVersionCreated()));
filters.add(Queries.newNonNestedFilter());
}

if (aliasFilter != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public class NestedAggregator extends BucketsAggregator implements SingleBucketA
super(name, factories, context, parentAggregator, pipelineAggregators, metaData);

Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter()
: Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated());
: Queries.newNonNestedFilter();
this.parentFilter = context.bitsetFilterCache().getBitSetProducer(parentFilter);
this.childFilter = childObjectMapper.nestedTypeFilter();
this.collectsFromSingleBucket = collectsFromSingleBucket;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ public ReverseNestedAggregator(String name, AggregatorFactories factories, Objec
throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
if (objectMapper == null) {
parentFilter = Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated());
parentFilter = Queries.newNonNestedFilter();
} else {
parentFilter = objectMapper.nestedTypeFilter();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ public void execute(SearchContext context) {
private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException {
if (context.mapperService().hasNested()) {
BitSet bits = context.bitsetFilterCache()
.getBitSetProducer(Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()))
.getBitSetProducer(Queries.newNonNestedFilter())
.getBitSet(subReaderContext);
if (!bits.get(subDocId)) {
return bits.nextSetBit(subDocId);
Expand Down Expand Up @@ -363,7 +363,7 @@ private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context
}
parentFilter = nestedParentObjectMapper.nestedTypeFilter();
} else {
parentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated());
parentFilter = Queries.newNonNestedFilter();
}

Query childFilter = nestedObjectMapper.nestedTypeFilter();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ protected static Nested resolveNested(QueryShardContext context, NestedSortBuild
final ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
final Query parentQuery;
if (objectMapper == null) {
parentQuery = Queries.newNonNestedFilter(context.indexVersionCreated());
parentQuery = Queries.newNonNestedFilter();
} else {
parentQuery = objectMapper.nestedTypeFilter();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,13 @@

package org.elasticsearch.action.admin.cluster.settings;

import org.elasticsearch.Version;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.Settings.Builder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
import org.elasticsearch.test.VersionUtils;

import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.function.Predicate;
Expand Down Expand Up @@ -100,8 +97,4 @@ protected ClusterUpdateSettingsResponse createBlankInstance() {
return new ClusterUpdateSettingsResponse();
}

public void testOldSerialisation() throws IOException {
ClusterUpdateSettingsResponse original = createTestInstance();
assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;

import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
import static org.hamcrest.Matchers.equalTo;
Expand All @@ -48,7 +47,7 @@ public void testBwcSerialization() throws Exception {
{
final CloseIndexResponse response = randomResponse();
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
out.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0));
response.writeTo(out);

final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse();
Expand All @@ -65,7 +64,7 @@ public void testBwcSerialization() throws Exception {

final CloseIndexResponse deserializedResponse = new CloseIndexResponse();
try (StreamInput in = out.bytes().streamInput()) {
in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
in.setVersion(randomVersionBetween(random(), Version.V_7_0_0, Version.V_7_1_0));
deserializedResponse.readFrom(in);
}
assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,11 @@
package org.elasticsearch.action.admin.indices.rollover;


import org.elasticsearch.Version;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
import org.elasticsearch.test.VersionUtils;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -131,9 +128,4 @@ protected RolloverResponse mutateInstance(RolloverResponse response) {
throw new UnsupportedOperationException();
}
}

public void testOldSerialisation() throws IOException {
RolloverResponse original = createTestInstance();
assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ protected MainResponse createTestInstance() {
ClusterName clusterName = new ClusterName(randomAlphaOfLength(10));
String nodeName = randomAlphaOfLength(10);
final String date = new Date(randomNonNegativeLong()).toString();
Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_1, Version.CURRENT);
Version version = VersionUtils.randomIndexCompatibleVersion(random());
Build build = new Build(
Build.Flavor.UNKNOWN, Build.Type.UNKNOWN, randomAlphaOfLength(8), date, randomBoolean(),
version.toString()
Expand Down
Loading

0 comments on commit 0e48bbb

Please sign in to comment.