From 8b8c0c0b4daaa3c3aa7e406a493a778dc07b04ab Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 26 Apr 2018 09:58:17 +0200 Subject: [PATCH 01/24] Add additional shards routing info in ShardSearchRequest (#29533) This commit propagates the preference and routing of the original SearchRequest in the ShardSearchRequest. This information is then use to fix a bug in sliced scrolls when executed with a preference (or a routing). Instead of computing the slice query from the total number of shards in the index, this commit computes this number from the number of shards per index that participates in the request. Fixes #27550 --- .../search/AbstractSearchAsyncAction.java | 26 +- .../search/CanMatchPreFilterSearchPhase.java | 8 +- .../action/search/InitialSearchPhase.java | 4 +- .../SearchDfsQueryThenFetchAsyncAction.java | 7 +- .../SearchQueryThenFetchAsyncAction.java | 10 +- .../action/search/TransportSearchAction.java | 22 +- .../cluster/routing/PlainShardsIterator.java | 2 +- .../cluster/routing/ShardRouting.java | 6 +- .../search/DefaultSearchContext.java | 16 +- .../elasticsearch/search/SearchService.java | 4 +- .../internal/ShardSearchLocalRequest.java | 48 +- .../search/internal/ShardSearchRequest.java | 16 +- .../internal/ShardSearchTransportRequest.java | 22 +- .../search/slice/SliceBuilder.java | 59 ++- .../AbstractSearchAsyncActionTests.java | 15 +- .../CanMatchPreFilterSearchPhaseTests.java | 9 +- .../action/search/SearchAsyncActionTests.java | 3 + .../cluster/routing/PrimaryTermsTests.java | 2 +- .../cluster/routing/RoutingTableTests.java | 2 +- .../index/SearchSlowLogTests.java | 10 + .../elasticsearch/routing/AliasRoutingIT.java | 2 +- .../routing/SimpleRoutingIT.java | 4 +- .../search/DefaultSearchContextTests.java | 12 +- .../search/SearchServiceTests.java | 30 +- .../ShardSearchTransportRequestTests.java | 6 +- .../search/slice/SearchSliceIT.java | 135 +++--- .../search/slice/SliceBuilderTests.java | 409 ++++++++++++------ 27 files changed, 605 insertions(+), 284 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index aad2638bd9de3..91aec1171dcd6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -37,8 +37,10 @@ import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.transport.Transport; +import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -62,6 +64,7 @@ abstract class AbstractSearchAsyncAction exten private final long clusterStateVersion; private final Map aliasFilter; private final Map concreteIndexBoosts; + private final Map> indexRoutings; private final SetOnce> shardFailures = new SetOnce<>(); private final Object shardFailuresMutex = new Object(); private final AtomicInteger successfulOps = new AtomicInteger(); @@ -72,6 +75,7 @@ abstract class AbstractSearchAsyncAction exten protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService, BiFunction nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, + Map> indexRoutings, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, @@ -89,6 +93,7 @@ protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportS this.clusterStateVersion = clusterStateVersion; this.concreteIndexBoosts = concreteIndexBoosts; this.aliasFilter = aliasFilter; + this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; } @@ -128,17 +133,17 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); - assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; + assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; if (allowPartialResults == false && shardFailures.get() != null ){ if (logger.isDebugEnabled()) { final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", + logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", shardSearchFailures.length, getName()), cause); } - onPhaseFailure(currentPhase, "Partial shards failure", null); - } else { + onPhaseFailure(currentPhase, "Partial shards failure", null); + } else { if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() .map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(",")); @@ -271,14 +276,14 @@ public final SearchRequest getRequest() { @Override public final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { - + ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; if (allowPartialResults == false && failures.length > 0){ - raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); - } - + raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); + } + return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(), skippedOps.get(), buildTookInMillis(), failures, clusters); } @@ -318,8 +323,11 @@ public final ShardSearchTransportRequest buildShardSearchRequest(SearchShardIter AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID()); assert filter != null; float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST); + String indexName = shardIt.shardId().getIndex().getName(); + final String[] routings = indexRoutings.getOrDefault(indexName, Collections.emptySet()) + .toArray(new String[0]); return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(), - filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias); + filter, indexBoost, timeProvider.getAbsoluteStartMillis(), clusterAlias, routings); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index fe42d50393635..0873ff40f7500 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -27,6 +27,7 @@ import org.elasticsearch.transport.Transport; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.BiFunction; import java.util.function.Function; @@ -47,6 +48,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, + Map> indexRoutings, Executor executor, SearchRequest request, ActionListener listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, @@ -56,9 +58,9 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction 0) { int maxConcurrentShardRequests = Math.min(this.maxConcurrentShardRequests, shardsIts.size()); final boolean success = shardExecutionIndex.compareAndSet(0, maxConcurrentShardRequests); - assert success; + assert success; assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; if (request.allowPartialSearchResults() == false) { final StringBuilder missingShards = new StringBuilder(); @@ -140,7 +140,7 @@ public final void run() throws IOException { final SearchShardIterator shardRoutings = shardsIts.get(index); if (shardRoutings.size() == 0) { if(missingShards.length() >0 ){ - missingShards.append(", "); + missingShards.append(", "); } missingShards.append(shardRoutings.shardId()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 9bcbe1c8e6760..0782fbb310b65 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.transport.Transport; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.BiFunction; @@ -37,11 +38,13 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction SearchDfsQueryThenFetchAsyncAction(final Logger logger, final SearchTransportService searchTransportService, final BiFunction nodeIdToConnection, final Map aliasFilter, - final Map concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor, + final Map concreteIndexBoosts, final Map> indexRoutings, + final SearchPhaseController searchPhaseController, final Executor executor, final SearchRequest request, final ActionListener listener, final GroupShardsIterator shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider, final long clusterStateVersion, final SearchTask task, SearchResponse.Clusters clusters) { - super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, + super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings, + executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task, new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters); this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index b7669312b0088..bbd84011de00b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.transport.Transport; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.BiFunction; @@ -37,13 +38,14 @@ final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction nodeIdToConnection, final Map aliasFilter, - final Map concreteIndexBoosts, final SearchPhaseController searchPhaseController, final Executor executor, + final Map concreteIndexBoosts, final Map> indexRoutings, + final SearchPhaseController searchPhaseController, final Executor executor, final SearchRequest request, final ActionListener listener, final GroupShardsIterator shardsIts, final TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task, SearchResponse.Clusters clusters) { - super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, request, listener, - shardsIts, timeProvider, clusterStateVersion, task, searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), - request.getMaxConcurrentShardRequests(), clusters); + super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, indexRoutings, + executor, request, listener, shardsIts, timeProvider, clusterStateVersion, task, + searchPhaseController.newSearchPhaseResults(request, shardsIts.size()), request.getMaxConcurrentShardRequests(), clusters); this.searchPhaseController = searchPhaseController; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index bd533ce7b097a..6b39af478f432 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -297,6 +297,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); + routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap); String[] concreteIndices = new String[indices.length]; for (int i = 0; i < indices.length; i++) { concreteIndices[i] = indices[i].getName(); @@ -350,7 +351,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea } boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators); searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(), - Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener, preFilterSearchShards, clusters).start(); + Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, routingMap, listener, preFilterSearchShards, clusters).start(); } private boolean shouldPreFilterSearchShards(SearchRequest searchRequest, GroupShardsIterator shardIterators) { @@ -380,17 +381,20 @@ private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchReque GroupShardsIterator shardIterators, SearchTimeProvider timeProvider, BiFunction connectionLookup, - long clusterStateVersion, Map aliasFilter, + long clusterStateVersion, + Map aliasFilter, Map concreteIndexBoosts, - ActionListener listener, boolean preFilter, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, SearchResponse.Clusters clusters) { Executor executor = threadPool.executor(ThreadPool.Names.SEARCH); if (preFilter) { return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardIterators, + aliasFilter, concreteIndexBoosts, indexRoutings, executor, searchRequest, listener, shardIterators, timeProvider, clusterStateVersion, task, (iter) -> { AbstractSearchAsyncAction action = searchAsyncAction(task, searchRequest, iter, timeProvider, connectionLookup, - clusterStateVersion, aliasFilter, concreteIndexBoosts, listener, false, clusters); + clusterStateVersion, aliasFilter, concreteIndexBoosts, indexRoutings, listener, false, clusters); return new SearchPhase(action.getName()) { @Override public void run() throws IOException { @@ -403,14 +407,14 @@ public void run() throws IOException { switch (searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, - timeProvider, clusterStateVersion, task, clusters); + aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener, + shardIterators, timeProvider, clusterStateVersion, task, clusters); break; case QUERY_AND_FETCH: case QUERY_THEN_FETCH: searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, - timeProvider, clusterStateVersion, task, clusters); + aliasFilter, concreteIndexBoosts, indexRoutings, searchPhaseController, executor, searchRequest, listener, + shardIterators, timeProvider, clusterStateVersion, task, clusters); break; default: throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]"); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java b/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java index 6cb1989a8dd02..e9a99b7b456c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardsIterator.java @@ -24,7 +24,7 @@ /** * A simple {@link ShardsIterator} that iterates a list or sub-list of - * {@link ShardRouting shard routings}. + * {@link ShardRouting shard indexRoutings}. */ public class PlainShardsIterator implements ShardsIterator { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index be1213ad134f1..6a9a105b6c432 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -38,7 +38,7 @@ /** * {@link ShardRouting} immutably encapsulates information about shard - * routings like id, state, version, etc. + * indexRoutings like id, state, version, etc. */ public final class ShardRouting implements Writeable, ToXContentObject { @@ -477,7 +477,7 @@ public boolean isRelocationTargetOf(ShardRouting other) { "ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation target but both routings are not of the same shard id. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but both indexRoutings are not of the same shard id. This [" + this + "], other [" + other + "]"; assert b == false || this.primary == other.primary : "ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]"; @@ -504,7 +504,7 @@ public boolean isRelocationSourceOf(ShardRouting other) { "ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]"; + "ShardRouting is a relocation source but both indexRoutings are not of the same shard. This [" + this + "], target [" + other + "]"; assert b == false || this.primary == other.primary : "ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]"; diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 1356a1458a2ed..d681a186892db 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -25,8 +25,10 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; import org.apache.lucene.util.Counter; +import org.elasticsearch.Version; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; @@ -91,6 +93,7 @@ final class DefaultSearchContext extends SearchContext { private final Engine.Searcher engineSearcher; private final BigArrays bigArrays; private final IndexShard indexShard; + private final ClusterService clusterService; private final IndexService indexService; private final ContextIndexSearcher searcher; private final DfsSearchResult dfsResult; @@ -120,6 +123,7 @@ final class DefaultSearchContext extends SearchContext { // filter for sliced scroll private SliceBuilder sliceBuilder; private SearchTask task; + private final Version minNodeVersion; /** @@ -152,9 +156,10 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private FetchPhase fetchPhase; - DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, - IndexService indexService, IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, - TimeValue timeout, FetchPhase fetchPhase, String clusterAlias) { + DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, + Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService, + IndexShard indexShard, BigArrays bigArrays, Counter timeEstimateCounter, TimeValue timeout, + FetchPhase fetchPhase, String clusterAlias, Version minNodeVersion) { this.id = id; this.request = request; this.fetchPhase = fetchPhase; @@ -168,9 +173,11 @@ final class DefaultSearchContext extends SearchContext { this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; + this.clusterService = clusterService; this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeout = timeout; + this.minNodeVersion = minNodeVersion; queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis, clusterAlias); queryShardContext.setTypes(request.types()); @@ -278,8 +285,7 @@ && new NestedHelper(mapperService()).mightMatchNestedDocs(query) } if (sliceBuilder != null) { - filters.add(sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(), - queryShardContext.getIndexSettings().getNumberOfShards())); + filters.add(sliceBuilder.toFilter(clusterService, request, queryShardContext, minNodeVersion)); } if (filters.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index a742a3a06ae13..ed7f98c3b0b12 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -616,8 +616,8 @@ private DefaultSearchContext createSearchContext(ShardSearchRequest request, Tim Engine.Searcher engineSearcher = indexShard.acquireSearcher("search"); final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, - engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase, - request.getClusterAlias()); + engineSearcher, clusterService, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, + fetchPhase, request.getClusterAlias(), clusterService.state().nodes().getMinNodeVersion()); boolean success = false; try { // we clone the query shard context here just for rewriting otherwise we diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index af52924a2de2c..52892d4d52eb9 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -28,13 +28,10 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.io.IOException; @@ -61,7 +58,6 @@ */ public class ShardSearchLocalRequest implements ShardSearchRequest { - private String clusterAlias; private ShardId shardId; private int numberOfShards; @@ -74,17 +70,18 @@ public class ShardSearchLocalRequest implements ShardSearchRequest { private Boolean requestCache; private long nowInMillis; private boolean allowPartialSearchResults; - + private String[] indexRoutings = Strings.EMPTY_ARRAY; + private String preference; private boolean profile; ShardSearchLocalRequest() { } ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards, - AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) { + AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias, String[] indexRoutings) { this(shardId, numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost, - searchRequest.allowPartialSearchResults()); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter, indexBoost, + searchRequest.allowPartialSearchResults(), indexRoutings, searchRequest.preference()); // If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted // at this stage. Any NPEs in the above are therefore an error in request preparation logic. assert searchRequest.allowPartialSearchResults() != null; @@ -102,7 +99,8 @@ public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, - Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults) { + Boolean requestCache, AliasFilter aliasFilter, float indexBoost, boolean allowPartialSearchResults, + String[] indexRoutings, String preference) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; @@ -112,6 +110,8 @@ public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType s this.aliasFilter = aliasFilter; this.indexBoost = indexBoost; this.allowPartialSearchResults = allowPartialSearchResults; + this.indexRoutings = indexRoutings; + this.preference = preference; } @@ -169,18 +169,28 @@ public long nowInMillis() { public Boolean requestCache() { return requestCache; } - + @Override public Boolean allowPartialSearchResults() { return allowPartialSearchResults; } - + @Override public Scroll scroll() { return scroll; } + @Override + public String[] indexRoutings() { + return indexRoutings; + } + + @Override + public String preference() { + return preference; + } + @Override public void setProfile(boolean profile) { this.profile = profile; @@ -225,6 +235,13 @@ protected void innerReadFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + indexRoutings = in.readStringArray(); + preference = in.readOptionalString(); + } else { + indexRoutings = Strings.EMPTY_ARRAY; + preference = null; + } } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { @@ -240,7 +257,7 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException if (out.getVersion().onOrAfter(Version.V_5_2_0)) { out.writeFloat(indexBoost); } - if (!asKey) { + if (asKey == false) { out.writeVLong(nowInMillis); } out.writeOptionalBoolean(requestCache); @@ -250,7 +267,12 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } - + if (asKey == false) { + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeStringArray(indexRoutings); + out.writeOptionalString(preference); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 19eb0f17ccc84..0a1513e17d08e 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.internal; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.CheckedFunction; @@ -28,8 +30,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.AliasFilterParsingException; @@ -68,11 +68,21 @@ public interface ShardSearchRequest { long nowInMillis(); Boolean requestCache(); - + Boolean allowPartialSearchResults(); Scroll scroll(); + /** + * Returns the routing values resolved by the coordinating node for the index pointed by {@link #shardId()}. + */ + String[] indexRoutings(); + + /** + * Returns the preference of the original {@link SearchRequest#preference()}. + */ + String preference(); + /** * Sets if this shard search needs to be profiled or not * @param profile True if the shard should be profiled diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index ac86d24ed000d..08060a2b249b6 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -28,9 +28,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryRewriteContext; -import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; @@ -57,9 +54,10 @@ public ShardSearchTransportRequest(){ } public ShardSearchTransportRequest(OriginalIndices originalIndices, SearchRequest searchRequest, ShardId shardId, int numberOfShards, - AliasFilter aliasFilter, float indexBoost, long nowInMillis, String clusterAlias) { + AliasFilter aliasFilter, float indexBoost, long nowInMillis, + String clusterAlias, String[] indexRoutings) { this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, indexBoost, - nowInMillis, clusterAlias); + nowInMillis, clusterAlias, indexRoutings); this.originalIndices = originalIndices; } @@ -151,17 +149,27 @@ public long nowInMillis() { public Boolean requestCache() { return shardSearchLocalRequest.requestCache(); } - + @Override public Boolean allowPartialSearchResults() { return shardSearchLocalRequest.allowPartialSearchResults(); - } + } @Override public Scroll scroll() { return shardSearchLocalRequest.scroll(); } + @Override + public String[] indexRoutings() { + return shardSearchLocalRequest.indexRoutings(); + } + + @Override + public String preference() { + return shardSearchLocalRequest.preference(); + } + @Override public void readFrom(StreamInput in) throws IOException { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index aabf0c3fd0c69..06eba08fd7f22 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -23,6 +23,10 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,6 +34,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,9 +44,13 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.internal.ShardSearchRequest; import java.io.IOException; +import java.util.Collections; +import java.util.Map; import java.util.Objects; +import java.util.Set; /** * A slice builder allowing to split a scroll in multiple partitions. @@ -203,12 +212,49 @@ public int hashCode() { return Objects.hash(this.field, this.id, this.max); } - public Query toFilter(QueryShardContext context, int shardId, int numShards) { + /** + * Converts this QueryBuilder to a lucene {@link Query}. + * + * @param context Additional information needed to build the query + */ + public Query toFilter(ClusterService clusterService, ShardSearchRequest request, QueryShardContext context, Version minNodeVersion) { final MappedFieldType type = context.fieldMapper(field); if (type == null) { throw new IllegalArgumentException("field " + field + " not found"); } + int shardId = request.shardId().id(); + int numShards = context.getIndexSettings().getNumberOfShards(); + if (minNodeVersion.onOrAfter(Version.V_7_0_0_alpha1) && + (request.preference() != null || request.indexRoutings().length > 0)) { + GroupShardsIterator group = buildShardIterator(clusterService, request); + assert group.size() <= numShards : "index routing shards: " + group.size() + + " cannot be greater than total number of shards: " + numShards; + if (group.size() < numShards) { + /** + * The routing of this request targets a subset of the shards of this index so we need to we retrieve + * the original {@link GroupShardsIterator} and compute the request shard id and number of + * shards from it. + * This behavior has been added in {@link Version#V_7_0_0_alpha1} so if there is another node in the cluster + * with an older version we use the original shard id and number of shards in order to ensure that all + * slices use the same numbers. + */ + numShards = group.size(); + int ord = 0; + shardId = -1; + // remap the original shard id with its index (position) in the sorted shard iterator. + for (ShardIterator it : group) { + assert it.shardId().getIndex().equals(request.shardId().getIndex()); + if (request.shardId().equals(it.shardId())) { + shardId = ord; + break; + } + ++ord; + } + assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing"; + } + } + String field = this.field; boolean useTermQuery = false; if ("_uid".equals(field)) { @@ -273,6 +319,17 @@ public Query toFilter(QueryShardContext context, int shardId, int numShards) { return new MatchAllDocsQuery(); } + /** + * Returns the {@link GroupShardsIterator} for the provided request. + */ + private GroupShardsIterator buildShardIterator(ClusterService clusterService, ShardSearchRequest request) { + final ClusterState state = clusterService.state(); + String[] indices = new String[] { request.shardId().getIndex().getName() }; + Map> routingMap = request.indexRoutings().length > 0 ? + Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings())) : null; + return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference()); + } + @Override public String toString() { return Strings.toString(this, true, true); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 6ade2b8781ecf..193878e2f5e04 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.shard.ShardId; @@ -62,10 +63,15 @@ private AbstractSearchAsyncAction createAction( final SearchRequest request = new SearchRequest(); request.allowPartialSearchResults(true); + request.preference("_shards:1,3"); return new AbstractSearchAsyncAction("test", null, null, null, - Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), null, - request, null, new GroupShardsIterator<>(Collections.singletonList( - new SearchShardIterator(null, null, Collections.emptyList(), null))), timeProvider, 0, null, + Collections.singletonMap("foo", new AliasFilter(new MatchAllQueryBuilder())), Collections.singletonMap("foo", 2.0f), + Collections.singletonMap("name", Sets.newHashSet("bar", "baz")),null, request, null, + new GroupShardsIterator<>( + Collections.singletonList( + new SearchShardIterator(null, null, Collections.emptyList(), null) + ) + ), timeProvider, 0, null, new InitialSearchPhase.ArraySearchPhaseResults<>(10), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY) { @Override @@ -117,5 +123,8 @@ public void testBuildShardSearchTransportRequest() { assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices()); assertEquals(new MatchAllQueryBuilder(), shardSearchTransportRequest.getAliasFilter().getQueryBuilder()); assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f); + assertArrayEquals(new String[] {"name", "name1"}, shardSearchTransportRequest.indices()); + assertArrayEquals(new String[] {"bar", "baz"}, shardSearchTransportRequest.indexRoutings()); + assertEquals("_shards:1,3", shardSearchTransportRequest.preference()); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java index d60f29a5d5395..8b1741967734c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -78,12 +78,12 @@ public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRe 2, randomBoolean(), primaryNode, replicaNode); final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); - + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), - Collections.emptyMap(), EsExecutors.newDirectExecutorService(), + Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, 0, null, (iter) -> new SearchPhase("test") { @Override @@ -159,12 +159,12 @@ public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRe final SearchRequest searchRequest = new SearchRequest(); searchRequest.allowPartialSearchResults(true); - + CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), - Collections.emptyMap(), EsExecutors.newDirectExecutorService(), + Collections.emptyMap(), Collections.emptyMap(), EsExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, 0, null, (iter) -> new SearchPhase("test") { @Override @@ -222,6 +222,7 @@ public void sendCanMatch( (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), Collections.emptyMap(), + Collections.emptyMap(), EsExecutors.newDirectExecutorService(), searchRequest, null, diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index c731d1aaabed0..82e0fcaf5d667 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -106,6 +106,7 @@ public void onFailure(Exception e) { return lookup.get(node); }, aliasFilters, Collections.emptyMap(), + Collections.emptyMap(), null, request, responseListener, @@ -198,6 +199,7 @@ public void onFailure(Exception e) { return lookup.get(node); }, aliasFilters, Collections.emptyMap(), + Collections.emptyMap(), null, request, responseListener, @@ -303,6 +305,7 @@ public void sendFreeContext(Transport.Connection connection, long contextId, Ori return lookup.get(node); }, aliasFilters, Collections.emptyMap(), + Collections.emptyMap(), executor, request, responseListener, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java index 65526896864d6..8a9b00a8d4ff7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java @@ -83,7 +83,7 @@ public void setUp() throws Exception { } /** - * puts primary shard routings into initializing state + * puts primary shard indexRoutings into initializing state */ private void initPrimaries() { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 055adbaebbce5..349997d7793eb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -83,7 +83,7 @@ public void setUp() throws Exception { } /** - * puts primary shard routings into initializing state + * puts primary shard indexRoutings into initializing state */ private void initPrimaries() { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index 6e8e679188c66..4ef9c36d9306c 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -122,6 +122,16 @@ public Scroll scroll() { return null; } + @Override + public String[] indexRoutings() { + return null; + } + + @Override + public String preference() { + return null; + } + @Override public void setProfile(boolean profile) { diff --git a/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java b/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java index 14ec800c3a65d..6b3b6a67a9783 100644 --- a/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java +++ b/server/src/test/java/org/elasticsearch/routing/AliasRoutingIT.java @@ -170,7 +170,7 @@ public void testAliasSearchRouting() throws Exception { assertThat(client().prepareSearch("alias1").setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L)); } - logger.info("--> search with 0,1 routings , should find two"); + logger.info("--> search with 0,1 indexRoutings , should find two"); for (int i = 0; i < 5; i++) { assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); assertThat(client().prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); diff --git a/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java b/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java index 84caed948a2be..0a2a43f2f83d4 100644 --- a/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java +++ b/server/src/test/java/org/elasticsearch/routing/SimpleRoutingIT.java @@ -173,13 +173,13 @@ public void testSimpleSearchRouting() { assertThat(client().prepareSearch().setSize(0).setRouting(secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L)); } - logger.info("--> search with {},{} routings , should find two", routingValue, "1"); + logger.info("--> search with {},{} indexRoutings , should find two", routingValue, "1"); for (int i = 0; i < 5; i++) { assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); } - logger.info("--> search with {},{},{} routings , should find two", routingValue, secondRoutingValue, routingValue); + logger.info("--> search with {},{},{} indexRoutings , should find two", routingValue, secondRoutingValue, routingValue); for (int i = 0; i < 5; i++) { assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue,routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L)); diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index ec422435e4e07..f59cc85c09ccf 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -112,8 +112,8 @@ public void testPreProcess() throws Exception { IndexReader reader = w.getReader(); Engine.Searcher searcher = new Engine.Searcher("test", new IndexSearcher(reader))) { - DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, indexService, - indexShard, bigArrays, null, timeout, null, null); + DefaultSearchContext context1 = new DefaultSearchContext(1L, shardSearchRequest, null, searcher, null, indexService, + indexShard, bigArrays, null, timeout, null, null, Version.CURRENT); context1.from(300); // resultWindow greater than maxResultWindow and scrollContext is null @@ -153,8 +153,8 @@ public void testPreProcess() throws Exception { + "] index level setting.")); // rescore is null but sliceBuilder is not null - DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, indexService, - indexShard, bigArrays, null, timeout, null, null); + DefaultSearchContext context2 = new DefaultSearchContext(2L, shardSearchRequest, null, searcher, + null, indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT); SliceBuilder sliceBuilder = mock(SliceBuilder.class); int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); @@ -170,8 +170,8 @@ public void testPreProcess() throws Exception { when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); - DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, indexService, - indexShard, bigArrays, null, timeout, null, null); + DefaultSearchContext context3 = new DefaultSearchContext(3L, shardSearchRequest, null, searcher, null, + indexService, indexShard, bigArrays, null, timeout, null, null, Version.CURRENT); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false); assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index f5552ee0d2e46..c58a158fc677d 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -213,7 +213,7 @@ public void onFailure(Exception e) { SearchPhaseResult searchPhaseResult = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, - true), + true, null, null), new SearchTask(123L, "", "", "", null, Collections.emptyMap())); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); @@ -249,7 +249,7 @@ public void testTimeout() throws IOException { new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, true) + 1.0f, true, null, null) ); try { // the search context should inherit the default timeout @@ -269,7 +269,7 @@ public void testTimeout() throws IOException { new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, true) + 1.0f, true, null, null) ); try { // the search context should inherit the query timeout @@ -297,12 +297,13 @@ public void testMaxDocvalueFieldsSearch() throws IOException { searchSourceBuilder.docValueField("field" + i); } try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) { + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) { assertNotNull(context); searchSourceBuilder.docValueField("one_field_too_much"); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))); + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, + true, null, null))); assertEquals( "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. " + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", @@ -328,13 +329,14 @@ public void testMaxScriptFieldsSearch() throws IOException { new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); } try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))) { + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true, null, null))) { assertNotNull(context); searchSourceBuilder.scriptField("anotherScriptField", new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, true))); + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, true, null, null))); assertEquals( "Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was [" + (maxScriptFields + 1) @@ -406,28 +408,28 @@ public void testCanMatch() throws IOException { final IndexShard indexShard = indexService.getShard(0); final boolean allowPartialSearchResults = true; assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, null, - Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, - new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, - allowPartialSearchResults))); + new SearchSourceBuilder(), Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, + allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchAllQueryBuilder()), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) .aggregation(new TermsAggregationBuilder("test", ValueType.STRING).minDocCount(0)), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertTrue(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchNoneQueryBuilder()) .aggregation(new GlobalAggregationBuilder("test")), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); assertFalse(service.canMatch(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.QUERY_THEN_FETCH, new SearchSourceBuilder().query(new MatchNoneQueryBuilder()), Strings.EMPTY_ARRAY, false, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults))); + new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, allowPartialSearchResults, null, null))); } diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index c2016ceb02ce7..21a4f099f5a32 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -74,6 +74,8 @@ public void testSerialization() throws Exception { assertEquals(deserializedRequest.searchType(), shardSearchTransportRequest.searchType()); assertEquals(deserializedRequest.shardId(), shardSearchTransportRequest.shardId()); assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards()); + assertEquals(deserializedRequest.indexRoutings(), shardSearchTransportRequest.indexRoutings()); + assertEquals(deserializedRequest.preference(), shardSearchTransportRequest.preference()); assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey()); assertNotSame(deserializedRequest, shardSearchTransportRequest); assertEquals(deserializedRequest.getAliasFilter(), shardSearchTransportRequest.getAliasFilter()); @@ -92,8 +94,10 @@ private ShardSearchTransportRequest createShardSearchTransportRequest() throws I } else { filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY); } + final String[] routings = generateRandomStringArray(5, 10, false, true); return new ShardSearchTransportRequest(new OriginalIndices(searchRequest), searchRequest, shardId, - randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), Math.abs(randomLong()), null); + randomIntBetween(1, 100), filteringAliases, randomBoolean() ? 1.0f : randomFloat(), + Math.abs(randomLong()), null, routings); } public void testFilteringAliases() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java index 2227cbb806b3f..d609f84e4192e 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.slice; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -48,9 +49,7 @@ import static org.hamcrest.Matchers.startsWith; public class SearchSliceIT extends ESIntegTestCase { - private static final int NUM_DOCS = 1000; - - private int setupIndex(boolean withDocs) throws IOException, ExecutionException, InterruptedException { + private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { String mapping = Strings.toString(XContentFactory.jsonBuilder(). startObject() .startObject("type") @@ -70,74 +69,112 @@ private int setupIndex(boolean withDocs) throws IOException, ExecutionException, .endObject() .endObject() .endObject()); - int numberOfShards = randomIntBetween(1, 7); assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000)) .addMapping("type", mapping, XContentType.JSON)); ensureGreen(); - if (withDocs == false) { - return numberOfShards; - } - List requests = new ArrayList<>(); - for (int i = 0; i < NUM_DOCS; i++) { - XContentBuilder builder = jsonBuilder(); - builder.startObject(); - builder.field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20)); - builder.field("random_int", randomInt()); - builder.field("static_int", 0); - builder.field("invalid_random_int", randomInt()); - builder.endObject(); + for (int i = 0; i < numDocs; i++) { + XContentBuilder builder = jsonBuilder() + .startObject() + .field("invalid_random_kw", randomAlphaOfLengthBetween(5, 20)) + .field("random_int", randomInt()) + .field("static_int", 0) + .field("invalid_random_int", randomInt()) + .endObject(); requests.add(client().prepareIndex("test", "type").setSource(builder)); } indexRandom(true, requests); - return numberOfShards; } - public void testDocIdSort() throws Exception { - int numShards = setupIndex(true); - SearchResponse sr = client().prepareSearch("test") - .setQuery(matchAllQuery()) - .setSize(0) - .get(); - int numDocs = (int) sr.getHits().getTotalHits(); - assertThat(numDocs, equalTo(NUM_DOCS)); - int max = randomIntBetween(2, numShards*3); + public void testSearchSort() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int max = randomIntBetween(2, numShards * 3); for (String field : new String[]{"_id", "random_int", "static_int"}) { int fetchSize = randomIntBetween(10, 100); + // test _doc sort SearchRequestBuilder request = client().prepareSearch("test") .setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setSize(fetchSize) .addSort(SortBuilders.fieldSort("_doc")); - assertSearchSlicesWithScroll(request, field, max); + assertSearchSlicesWithScroll(request, field, max, numDocs); + + // test numeric sort + request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .addSort(SortBuilders.fieldSort("random_int")) + .setSize(fetchSize); + assertSearchSlicesWithScroll(request, field, max, numDocs); } } - public void testNumericSort() throws Exception { - int numShards = setupIndex(true); - SearchResponse sr = client().prepareSearch("test") - .setQuery(matchAllQuery()) - .setSize(0) - .get(); - int numDocs = (int) sr.getHits().getTotalHits(); - assertThat(numDocs, equalTo(NUM_DOCS)); - - int max = randomIntBetween(2, numShards*3); - for (String field : new String[]{"_id", "random_int", "static_int"}) { + public void testWithPreferenceAndRoutings() throws Exception { + int numShards = 10; + int totalDocs = randomIntBetween(100, 1000); + setupIndex(totalDocs, numShards); + { + SearchResponse sr = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPreference("_shards:1,4") + .setSize(0) + .get(); + int numDocs = (int) sr.getHits().getTotalHits(); + int max = randomIntBetween(2, numShards * 3); int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = client().prepareSearch("test") .setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) - .addSort(SortBuilders.fieldSort("random_int")) - .setSize(fetchSize); - assertSearchSlicesWithScroll(request, field, max); + .setSize(fetchSize) + .setPreference("_shards:1,4") + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithScroll(request, "_id", max, numDocs); + } + { + SearchResponse sr = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setRouting("foo", "bar") + .setSize(0) + .get(); + int numDocs = (int) sr.getHits().getTotalHits(); + int max = randomIntBetween(2, numShards * 3); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setSize(fetchSize) + .setRouting("foo", "bar") + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithScroll(request, "_id", max, numDocs); + } + { + assertAcked(client().admin().indices().prepareAliases() + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias1").routing("foo")) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias2").routing("bar")) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test").alias("alias3").routing("baz")) + .get()); + SearchResponse sr = client().prepareSearch("alias1", "alias3") + .setQuery(matchAllQuery()) + .setSize(0) + .get(); + int numDocs = (int) sr.getHits().getTotalHits(); + int max = randomIntBetween(2, numShards * 3); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("alias1", "alias3") + .setQuery(matchAllQuery()) + .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithScroll(request, "_id", max, numDocs); } } public void testInvalidFields() throws Exception { - setupIndex(false); + setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("test") .setQuery(matchAllQuery()) @@ -161,7 +198,7 @@ public void testInvalidFields() throws Exception { } public void testInvalidQuery() throws Exception { - setupIndex(false); + setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch() .setQuery(matchAllQuery()) @@ -173,7 +210,7 @@ public void testInvalidQuery() throws Exception { equalTo("`slice` cannot be used outside of a scroll context")); } - private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice) { + private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { int totalResults = 0; List keys = new ArrayList<>(); for (int id = 0; id < numSlice; id++) { @@ -184,7 +221,7 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f int numSliceResults = searchResponse.getHits().getHits().length; String scrollId = searchResponse.getScrollId(); for (SearchHit hit : searchResponse.getHits().getHits()) { - keys.add(hit.getId()); + assertTrue(keys.add(hit.getId())); } while (searchResponse.getHits().getHits().length > 0) { searchResponse = client().prepareSearchScroll("test") @@ -195,15 +232,15 @@ private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String f totalResults += searchResponse.getHits().getHits().length; numSliceResults += searchResponse.getHits().getHits().length; for (SearchHit hit : searchResponse.getHits().getHits()) { - keys.add(hit.getId()); + assertTrue(keys.add(hit.getId())); } } assertThat(numSliceResults, equalTo(expectedSliceResults)); clearScroll(scrollId); } - assertThat(totalResults, equalTo(NUM_DOCS)); - assertThat(keys.size(), equalTo(NUM_DOCS)); - assertThat(new HashSet(keys).size(), equalTo(NUM_DOCS)); + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet(keys).size(), equalTo(numDocs)); } private Throwable findRootCause(Exception e) { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index 75802e92ee176..d6bcbfa8e6b7d 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -30,19 +30,38 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.elasticsearch.Version; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchShardIterator; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.Rewriteable; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -58,13 +77,138 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class SliceBuilderTests extends ESTestCase { private static final int MAX_SLICE = 20; - private static SliceBuilder randomSliceBuilder() throws IOException { + static class ShardSearchRequestTest implements IndicesRequest, ShardSearchRequest { + private final String[] indices; + private final int shardId; + private final String[] indexRoutings; + private final String preference; + + ShardSearchRequestTest(String index, int shardId, String[] indexRoutings, String preference) { + this.indices = new String[] { index }; + this.shardId = shardId; + this.indexRoutings = indexRoutings; + this.preference = preference; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return null; + } + + @Override + public ShardId shardId() { + return new ShardId(new Index(indices[0], indices[0]), shardId); + } + + @Override + public String[] types() { + return new String[0]; + } + + @Override + public SearchSourceBuilder source() { + return null; + } + + @Override + public AliasFilter getAliasFilter() { + return null; + } + + @Override + public void setAliasFilter(AliasFilter filter) { + + } + + @Override + public void source(SearchSourceBuilder source) { + + } + + @Override + public int numberOfShards() { + return 0; + } + + @Override + public SearchType searchType() { + return null; + } + + @Override + public float indexBoost() { + return 0; + } + + @Override + public long nowInMillis() { + return 0; + } + + @Override + public Boolean requestCache() { + return null; + } + + @Override + public Boolean allowPartialSearchResults() { + return null; + } + + @Override + public Scroll scroll() { + return null; + } + + @Override + public String[] indexRoutings() { + return indexRoutings; + } + + @Override + public String preference() { + return preference; + } + + @Override + public void setProfile(boolean profile) { + + } + + @Override + public boolean isProfile() { + return false; + } + + @Override + public BytesReference cacheKey() throws IOException { + return null; + } + + @Override + public String getClusterAlias() { + return null; + } + + @Override + public Rewriteable getRewriteable() { + return null; + } + } + + private static SliceBuilder randomSliceBuilder() { int max = randomIntBetween(2, MAX_SLICE); int id = randomIntBetween(1, max - 1); String field = randomAlphaOfLengthBetween(5, 20); @@ -75,7 +219,7 @@ private static SliceBuilder serializedCopy(SliceBuilder original) throws IOExcep return copyWriteable(original, new NamedWriteableRegistry(Collections.emptyList()), SliceBuilder::new); } - private static SliceBuilder mutate(SliceBuilder original) throws IOException { + private static SliceBuilder mutate(SliceBuilder original) { switch (randomIntBetween(0, 2)) { case 0: return new SliceBuilder(original.getField() + "_xyz", original.getId(), original.getMax()); case 1: return new SliceBuilder(original.getField(), original.getId() - 1, original.getMax()); @@ -84,6 +228,63 @@ private static SliceBuilder mutate(SliceBuilder original) throws IOException { } } + private IndexSettings createIndexSettings(Version indexVersionCreated, int numShards) { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); + return new IndexSettings(indexState, Settings.EMPTY); + } + + private ShardSearchRequest createRequest(int shardId) { + return createRequest(shardId, Strings.EMPTY_ARRAY, null); + } + + private ShardSearchRequest createRequest(int shardId, String[] routings, String preference) { + return new ShardSearchRequestTest("index", shardId, routings, preference); + } + + private QueryShardContext createShardContext(Version indexVersionCreated, IndexReader reader, + String fieldName, DocValuesType dvType, int numShards, int shardId) { + MappedFieldType fieldType = new MappedFieldType() { + @Override + public MappedFieldType clone() { + return null; + } + + @Override + public String typeName() { + return null; + } + + @Override + public Query termQuery(Object value, @Nullable QueryShardContext context) { + return null; + } + + public Query existsQuery(QueryShardContext context) { + return null; + } + }; + fieldType.setName(fieldName); + QueryShardContext context = mock(QueryShardContext.class); + when(context.fieldMapper(fieldName)).thenReturn(fieldType); + when(context.getIndexReader()).thenReturn(reader); + when(context.getShardId()).thenReturn(shardId); + IndexSettings indexSettings = createIndexSettings(indexVersionCreated, numShards); + when(context.getIndexSettings()).thenReturn(indexSettings); + if (dvType != null) { + fieldType.setHasDocValues(true); + fieldType.setDocValuesType(dvType); + IndexNumericFieldData fd = mock(IndexNumericFieldData.class); + when(context.getForField(fieldType)).thenReturn(fd); + } + return context; + + } + public void testSerialization() throws Exception { SliceBuilder original = randomSliceBuilder(); SliceBuilder deserialized = serializedCopy(original); @@ -131,92 +332,41 @@ public void testInvalidArguments() throws Exception { assertEquals("max must be greater than id", e.getMessage()); } - public void testToFilter() throws IOException { + public void testToFilterSimple() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { writer.commit(); } - QueryShardContext context = mock(QueryShardContext.class); try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName(IdFieldMapper.NAME); - fieldType.setHasDocValues(false); - when(context.fieldMapper(IdFieldMapper.NAME)).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); - IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY); - when(context.getIndexSettings()).thenReturn(indexSettings); + QueryShardContext context = + createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED_NUMERIC, 1,0); SliceBuilder builder = new SliceBuilder(5, 10); - Query query = builder.toFilter(context, 0, 1); + Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); assertThat(query, instanceOf(TermsSliceQuery.class)); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); try (IndexReader newReader = DirectoryReader.open(dir)) { when(context.getIndexReader()).thenReturn(newReader); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); } } + } + public void testToFilterRandom() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName("field_doc_values"); - fieldType.setHasDocValues(true); - fieldType.setDocValuesType(DocValuesType.SORTED_NUMERIC); - when(context.fieldMapper("field_doc_values")).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - IndexNumericFieldData fd = mock(IndexNumericFieldData.class); - when(context.getForField(fieldType)).thenReturn(fd); - SliceBuilder builder = new SliceBuilder("field_doc_values", 5, 10); - Query query = builder.toFilter(context, 0, 1); + QueryShardContext context = + createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED_NUMERIC, 1,0); + SliceBuilder builder = new SliceBuilder("field", 5, 10); + Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); assertThat(query, instanceOf(DocValuesSliceQuery.class)); - - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); try (IndexReader newReader = DirectoryReader.open(dir)) { when(context.getIndexReader()).thenReturn(newReader); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); } // numSlices > numShards @@ -226,7 +376,8 @@ public Query existsQuery(QueryShardContext context) { for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); - Query q = slice.toFilter(context, j, numShards); + context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j); + Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (q instanceof TermsSliceQuery || q instanceof MatchAllDocsQuery) { AtomicInteger count = numSliceMap.get(j); if (count == null) { @@ -250,12 +401,13 @@ public Query existsQuery(QueryShardContext context) { // numShards > numSlices numShards = randomIntBetween(4, 100); - numSlices = randomIntBetween(2, numShards-1); + numSlices = randomIntBetween(2, numShards - 1); List targetShards = new ArrayList<>(); for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); - Query q = slice.toFilter(context, j, numShards); + context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j); + Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (q instanceof MatchNoDocsQuery == false) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); targetShards.add(j); @@ -271,7 +423,7 @@ public Query existsQuery(QueryShardContext context) { for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); - Query q = slice.toFilter(context, j, numShards); + Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (i == j) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); } else { @@ -280,85 +432,35 @@ public Query existsQuery(QueryShardContext context) { } } } + } + public void testInvalidField() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName("field_without_doc_values"); - when(context.fieldMapper("field_without_doc_values")).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - SliceBuilder builder = new SliceBuilder("field_without_doc_values", 5, 10); - IllegalArgumentException exc = - expectThrows(IllegalArgumentException.class, () -> builder.toFilter(context, 0, 1)); + QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", null, 1,0); + SliceBuilder builder = new SliceBuilder("field", 5, 10); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> builder.toFilter(null, createRequest(0), context, Version.CURRENT)); assertThat(exc.getMessage(), containsString("cannot load numeric doc values")); } } - public void testToFilterDeprecationMessage() throws IOException { Directory dir = new RAMDirectory(); try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { writer.commit(); } - QueryShardContext context = mock(QueryShardContext.class); try (IndexReader reader = DirectoryReader.open(dir)) { - MappedFieldType fieldType = new MappedFieldType() { - @Override - public MappedFieldType clone() { - return null; - } - - @Override - public String typeName() { - return null; - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return null; - } - - public Query existsQuery(QueryShardContext context) { - return null; - } - }; - fieldType.setName("_uid"); - fieldType.setHasDocValues(false); - when(context.fieldMapper("_uid")).thenReturn(fieldType); - when(context.getIndexReader()).thenReturn(reader); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); - IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY); - when(context.getIndexSettings()).thenReturn(indexSettings); + QueryShardContext context = createShardContext(Version.V_6_3_0, reader, "_uid", null, 1,0); SliceBuilder builder = new SliceBuilder("_uid", 5, 10); - Query query = builder.toFilter(context, 0, 1); + Query query = builder.toFilter(null, createRequest(0), context, Version.CURRENT); assertThat(query, instanceOf(TermsSliceQuery.class)); - assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertThat(builder.toFilter(null, createRequest(0), context, Version.CURRENT), equalTo(query)); assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); } - } public void testSerializationBackcompat() throws IOException { @@ -375,4 +477,35 @@ public void testSerializationBackcompat() throws IOException { SliceBuilder::new, Version.V_6_3_0); assertEquals(sliceBuilder, copy63); } + + public void testToFilterWithRouting() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } + ClusterService clusterService = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + when(state.metaData()).thenReturn(MetaData.EMPTY_META_DATA); + when(clusterService.state()).thenReturn(state); + OperationRouting routing = mock(OperationRouting.class); + GroupShardsIterator it = new GroupShardsIterator<>( + Collections.singletonList( + new SearchShardIterator(null, new ShardId("index", "index", 1), null, null) + ) + ); + when(routing.searchShards(any(), any(), any(), any())).thenReturn(it); + when(clusterService.operationRouting()).thenReturn(routing); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + try (IndexReader reader = DirectoryReader.open(dir)) { + QueryShardContext context = createShardContext(Version.CURRENT, reader, "field", DocValuesType.SORTED, 5, 0); + SliceBuilder builder = new SliceBuilder("field", 6, 10); + String[] routings = new String[] { "foo" }; + Query query = builder.toFilter(clusterService, createRequest(1, routings, null), context, Version.CURRENT); + assertEquals(new DocValuesSliceQuery("field", 6, 10), query); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.CURRENT); + assertEquals(new DocValuesSliceQuery("field", 6, 10), query); + query = builder.toFilter(clusterService, createRequest(1, Strings.EMPTY_ARRAY, "foo"), context, Version.V_6_2_0); + assertEquals(new DocValuesSliceQuery("field", 1, 2), query); + } + } } From 0b4d2f52256dc3c7e4de97fcc6139ada3cf7a588 Mon Sep 17 00:00:00 2001 From: Saren Currie Date: Thu, 26 Apr 2018 20:45:48 +1200 Subject: [PATCH 02/24] Clarify documentation of scroll_id (#29424) * Clarify documentation of scroll_id The Scroll API may return the same scroll ID for multiple requests due to server side state. This is not clear from the current documentation. * Further clarify scroll ID return behaviour --- docs/reference/search/request/scroll.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index be725aaf362f5..0fd6979ef9568 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -78,9 +78,9 @@ returned with each batch of results. Each call to the `scroll` API returns the next batch of results until there are no more results left to return, ie the `hits` array is empty. -IMPORTANT: The initial search request and each subsequent scroll request -returns a new `_scroll_id` -- only the most recent `_scroll_id` should be -used. +IMPORTANT: The initial search request and each subsequent scroll request each +return a `_scroll_id`, which may change with each request -- only the most +recent `_scroll_id` should be used. NOTE: If the request specifies aggregations, only the initial search response will contain the aggregations results. From 05ef60135b06d3582c20628a0edd636cf9e59886 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 26 Apr 2018 11:42:23 +0100 Subject: [PATCH 03/24] Reinstate missing documentation (#28781) The documentation for settings index.routing.allocation.enable, index.routing.rebalance.enable and index.gc_deletes was lost in f123a53d7258349a171e47a35f4581899d8fa776. This change reinstates it. --- docs/reference/docs/delete.asciidoc | 13 ++++++++----- docs/reference/index-modules.asciidoc | 21 +++++++++++++++++++++ 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 782a625586b87..49f31eb2d75fb 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -39,11 +39,14 @@ The result of the above delete operation is: [[delete-versioning]] === Versioning -Each document indexed is versioned. When deleting a document, the -`version` can be specified to make sure the relevant document we are -trying to delete is actually being deleted and it has not changed in the -meantime. Every write operation executed on a document, deletes included, -causes its version to be incremented. +Each document indexed is versioned. When deleting a document, the `version` can +be specified to make sure the relevant document we are trying to delete is +actually being deleted and it has not changed in the meantime. Every write +operation executed on a document, deletes included, causes its version to be +incremented. The version number of a deleted document remains available for a +short time after deletion to allow for control of concurrent operations. The +length of time for which a deleted document's version remains available is +determined by the `index.gc_deletes` index setting and defaults to 60 seconds. [float] [[delete-routing]] diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 0ab742108b92f..ed0077a629d7c 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -214,6 +214,27 @@ specific index module: The maximum length of regex that can be used in Regexp Query. Defaults to `1000`. + `index.routing.allocation.enable`:: + + Controls shard allocation for this index. It can be set to: + * `all` (default) - Allows shard allocation for all shards. + * `primaries` - Allows shard allocation only for primary shards. + * `new_primaries` - Allows shard allocation only for newly-created primary shards. + * `none` - No shard allocation is allowed. + + `index.routing.rebalance.enable`:: + + Enables shard rebalancing for this index. It can be set to: + * `all` (default) - Allows shard rebalancing for all shards. + * `primaries` - Allows shard rebalancing only for primary shards. + * `replicas` - Allows shard rebalancing only for replica shards. + * `none` - No shard rebalancing is allowed. + + `index.gc_deletes`:: + + The length of time that a <> remains available for <>. + Defaults to `60s`. + [float] === Settings in other index modules From 752ba2fb4569587e4d1e5355b95ce628027d8799 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 26 Apr 2018 14:06:56 +0200 Subject: [PATCH 04/24] Adjust serialization versions after backport Relates #29533 --- .../search/internal/ShardSearchLocalRequest.java | 4 ++-- .../java/org/elasticsearch/search/slice/SliceBuilder.java | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 52892d4d52eb9..cf656ed3b9cb2 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -235,7 +235,7 @@ protected void innerReadFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { indexRoutings = in.readStringArray(); preference = in.readOptionalString(); } else { @@ -268,7 +268,7 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException out.writeOptionalBoolean(allowPartialSearchResults); } if (asKey == false) { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeStringArray(indexRoutings); out.writeOptionalString(preference); } diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 06eba08fd7f22..7e6945b9d4822 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -225,7 +225,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, int shardId = request.shardId().id(); int numShards = context.getIndexSettings().getNumberOfShards(); - if (minNodeVersion.onOrAfter(Version.V_7_0_0_alpha1) && + if (minNodeVersion.onOrAfter(Version.V_6_4_0) && (request.preference() != null || request.indexRoutings().length > 0)) { GroupShardsIterator group = buildShardIterator(clusterService, request); assert group.size() <= numShards : "index routing shards: " + group.size() + @@ -235,7 +235,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, * The routing of this request targets a subset of the shards of this index so we need to we retrieve * the original {@link GroupShardsIterator} and compute the request shard id and number of * shards from it. - * This behavior has been added in {@link Version#V_7_0_0_alpha1} so if there is another node in the cluster + * This behavior has been added in {@link Version#V_6_4_0} so if there is another node in the cluster * with an older version we use the original shard id and number of shards in order to ensure that all * slices use the same numbers. */ From 16490d7dfa90985f0fabf7f2d14d1d7803f53a9c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 26 Apr 2018 09:28:14 -0400 Subject: [PATCH 05/24] TEST: Update settings should go through cluster state (#29682) Today we update index settings directly via IndexService instead of the cluster state in IndexServiceTests. However, those changes will be lost if there is a cluster state update. In general, we should update index settings via client and limit the direct usage in only special tests. This commit replaces direct usages by the updateSettings api of client. Closes #24491 --- .../resources/checkstyle_suppressions.xml | 1 - .../index/IndexServiceTests.java | 45 +++++++++---------- 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 2aa72f0fa7a1c..609a7cf2ea66f 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -535,7 +535,6 @@ - diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 4dc6a859a5c5a..28fa440d96ac2 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -21,7 +21,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -131,16 +130,16 @@ public void testRefreshTaskIsUpdated() throws IOException { assertTrue(indexService.getRefreshTask().mustReschedule()); // now disable - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get(); assertNotSame(refreshTask, indexService.getRefreshTask()); assertTrue(refreshTask.isClosed()); assertFalse(refreshTask.isScheduled()); assertFalse(indexService.getRefreshTask().mustReschedule()); // set it to 100ms - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "100ms")).get(); assertNotSame(refreshTask, indexService.getRefreshTask()); assertTrue(refreshTask.isClosed()); @@ -150,8 +149,8 @@ public void testRefreshTaskIsUpdated() throws IOException { assertEquals(100, refreshTask.getInterval().millis()); // set it to 200ms - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get(); assertNotSame(refreshTask, indexService.getRefreshTask()); assertTrue(refreshTask.isClosed()); @@ -161,8 +160,8 @@ public void testRefreshTaskIsUpdated() throws IOException { assertEquals(200, refreshTask.getInterval().millis()); // set it to 200ms again - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "200ms")).get(); assertSame(refreshTask, indexService.getRefreshTask()); assertTrue(indexService.getRefreshTask().mustReschedule()); assertTrue(refreshTask.isScheduled()); @@ -174,7 +173,9 @@ public void testRefreshTaskIsUpdated() throws IOException { } public void testFsyncTaskIsRunning() throws IOException { - IndexService indexService = createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build()); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build(); + IndexService indexService = createIndex("test", settings); IndexService.AsyncTranslogFSync fsyncTask = indexService.getFsyncTask(); assertNotNull(fsyncTask); assertEquals(5000, fsyncTask.getInterval().millis()); @@ -198,12 +199,10 @@ public void testRefreshActuallyWorks() throws Exception { IndexShard shard = indexService.getShard(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); // now disable the refresh - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()) - .settings(Settings.builder().put(indexService.getMetaData().getSettings()) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).build(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get(); // when we update we reschedule the existing task AND fire off an async refresh to make sure we make everything visible // before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible - indexService.updateMetaData(metaData); assertTrue(refreshTask.isClosed()); refreshTask = indexService.getRefreshTask(); assertBusy(() -> { @@ -217,10 +216,8 @@ public void testRefreshActuallyWorks() throws Exception { assertFalse(refreshTask.isClosed()); // refresh every millisecond client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); - metaData = IndexMetaData.builder(indexService.getMetaData()) - .settings(Settings.builder().put(indexService.getMetaData().getSettings()) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).build(); - indexService.updateMetaData(metaData); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).get(); assertTrue(refreshTask.isClosed()); assertBusy(() -> { // this one becomes visible due to the force refresh we are running on updateMetaData if the interval changes @@ -303,13 +300,11 @@ public void testAsyncTranslogTrimActuallyWorks() throws Exception { assertTrue(indexService.getRefreshTask().mustReschedule()); client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); client().admin().indices().prepareFlush("test").get(); - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder() - .put(indexService.getMetaData().getSettings()) - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) - .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1)) - .build(); - indexService.updateMetaData(metaData); - + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), -1) + .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), -1)) + .get(); IndexShard shard = indexService.getShard(0); assertBusy(() -> assertThat(shard.estimateTranslogOperationsFromMinSeq(0L), equalTo(0))); } From 67c0cf1dbf5bf5122c467a4b9c7c2936968ac8e5 Mon Sep 17 00:00:00 2001 From: Andy Bristol Date: Thu, 26 Apr 2018 06:58:41 -0700 Subject: [PATCH 06/24] [test] include oss tar in packaging tests (#30155) Add the oss tar distribution to the packaging test plugin. Test the oss tar distribution in the core packaging tests, and the non-oss tar distribution in the x-pack packaging tests. --- .../gradle/vagrant/VagrantTestPlugin.groovy | 18 ++++++++++-------- .../packaging/tests/20_tar_package.bats | 3 ++- .../test/resources/packaging/utils/tar.bash | 6 +++++- .../resources/packaging/tests/10_basic.bats | 1 + 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 4c1decdc3ddf6..7a0b9f96781df 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -37,8 +37,15 @@ class VagrantTestPlugin implements Plugin { 'ubuntu-1404', ] - /** All onboarded archives by default, available for Bats tests even if not used **/ - static List DISTRIBUTION_ARCHIVES = ['tar', 'rpm', 'deb', 'oss-rpm', 'oss-deb'] + /** All distributions to bring into test VM, whether or not they are used **/ + static List DISTRIBUTIONS = [ + 'archives:tar', + 'archives:oss-tar', + 'packages:rpm', + 'packages:oss-rpm', + 'packages:deb', + 'packages:oss-deb' + ] /** Packages onboarded for upgrade tests **/ static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] @@ -117,13 +124,8 @@ class VagrantTestPlugin implements Plugin { upgradeFromVersion = Version.fromString(upgradeFromVersionRaw) } - DISTRIBUTION_ARCHIVES.each { + DISTRIBUTIONS.each { // Adds a dependency for the current version - if (it == 'tar') { - it = 'archives:tar' - } else { - it = "packages:${it}" - } project.dependencies.add(PACKAGING_CONFIGURATION, project.dependencies.project(path: ":distribution:${it}", configuration: 'default')) } diff --git a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats index 3536c2a207ddd..1a3704c33172f 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats @@ -55,7 +55,8 @@ setup() { } @test "[TAR] archive is available" { - count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l) + local version=$(cat version) + count=$(find . -type f -name "${PACKAGE_NAME}-${version}.tar.gz" | wc -l) [ "$count" -eq 1 ] } diff --git a/qa/vagrant/src/test/resources/packaging/utils/tar.bash b/qa/vagrant/src/test/resources/packaging/utils/tar.bash index 9b4bc76d841c9..4ded1f73514b2 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/tar.bash @@ -35,10 +35,12 @@ install_archive() { export ESHOME=${1:-/tmp/elasticsearch} + local version=$(cat version) + echo "Unpacking tarball to $ESHOME" rm -rf /tmp/untar mkdir -p /tmp/untar - tar -xzpf elasticsearch*.tar.gz -C /tmp/untar + tar -xzpf "${PACKAGE_NAME}-${version}.tar.gz" -C /tmp/untar find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null @@ -79,6 +81,8 @@ export_elasticsearch_paths() { export ESSCRIPTS="$ESCONFIG/scripts" export ESDATA="$ESHOME/data" export ESLOG="$ESHOME/logs" + + export PACKAGE_NAME=${PACKAGE_NAME:-"elasticsearch-oss"} } # Checks that all directories & files are correctly installed diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats b/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats index 796f4509607ff..898fedbff794e 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/10_basic.bats @@ -19,6 +19,7 @@ load $BATS_UTILS/xpack.bash setup() { skip_not_tar_gz export ESHOME=/tmp/elasticsearch + export PACKAGE_NAME="elasticsearch" export_elasticsearch_paths export ESPLUGIN_COMMAND_USER=elasticsearch } From 5785245ae17e4ff6acd135d4524b3ec8cc745fcd Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 26 Apr 2018 08:19:17 -0700 Subject: [PATCH 07/24] add copyright/scope configuration for intellij to Contributing Guide (#29688) * add copyright/scope configuration for intellij This commit introduces a section discussing the ability to define copyright rules in intellij for inserting the appropriate copyright headers into files across both apache2-Elasticsearch, as well as the commercial code under `x-pack`. Some other re-organization was made to create more sub-structure * update to reflect reviewer comments --- CONTRIBUTING.md | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 69e90473a7f61..03b2674a4cc8c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -107,6 +107,8 @@ We support development in the Eclipse and IntelliJ IDEs. For Eclipse, the minimum version that we support is [Eclipse Oxygen][eclipse] (version 4.7). For IntelliJ, the minimum version that we support is [IntelliJ 2017.2][intellij]. +### Configuring IDEs And Running Tests + Eclipse users can automatically configure their IDE: `./gradlew eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to @@ -144,6 +146,9 @@ For IntelliJ, go to For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to `VM Arguments`. + +### Java Language Formatting Guidelines + Please follow these formatting guidelines: * Java indent is 4 spaces @@ -155,6 +160,33 @@ Please follow these formatting guidelines: * IntelliJ: `Preferences/Settings->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value. * Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so. +### License Headers + +We require license headers on all Java files. You will notice that all the Java files in +the top-level `x-pack` directory contain a separate license from the rest of the repository. This +directory contains commercial code that is associated with a separate license. It can be helpful +to have the IDE automatically insert the appropriate license header depending which part of the project +contributions are made to. + +#### IntelliJ: Copyright & Scope Profiles + +To have IntelliJ insert the correct license, it is necessary to create to copyright profiles. +These may potentially be called `apache2` and `commercial`. These can be created in +`Preferences/Settings->Editor->Copyright->Copyright Profiles`. To associate these profiles to +their respective directories, two "Scopes" will need to be created. These can be created in +`Preferences/Settings->Appearances & Behavior->Scopes`. When creating scopes, be sure to choose +the `shared` scope type. Create a scope, `apache2`, with +the associated pattern of `!file[group:x-pack]:*/`. This pattern will exclude all the files contained in +the `x-pack` directory. The other scope, `commercial`, will have the inverse pattern of `file[group:x-pack]:*/`. +The two scopes, together, should account for all the files in the project. To associate the scopes +with their copyright-profiles, go into `Preferences/Settings->Editor>Copyright` and use the `+` to add +the associations `apache2/apache2` and `commercial/commercial`. + +Configuring these options in IntelliJ can be quite buggy, so do not be alarmed if you have to open/close +the settings window and/or restart IntelliJ to see your changes take effect. + +### Creating A Distribution + To create a distribution from the source, simply run: ```sh @@ -169,6 +201,8 @@ The archive distributions (tar and zip) can be found under: `./distribution/archives/(tar|zip)/build/distributions/` +### Running The Full Test Suite + Before submitting your changes, run the test suite to make sure that nothing is broken, with: ```sh From b15631ee547185c9156076a9743d942042899d57 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 26 Apr 2018 17:26:37 +0200 Subject: [PATCH 08/24] [Test] Fix RenameProcessorTests.testRenameExistingFieldNullValue() (#29655) This test fails when the new field name already exists in the ingest document. --- .../org/elasticsearch/ingest/common/RenameProcessorTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index 758e5eb997297..bf35918ad6e24 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -128,7 +128,7 @@ public void testRenameExistingFieldNullValue() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); ingestDocument.setFieldValue(fieldName, null); - String newFieldName = RandomDocumentPicks.randomFieldName(random()); + String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random())); Processor processor = new RenameProcessor(randomAlphaOfLength(10), fieldName, newFieldName, false); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(fieldName), equalTo(false)); From 1d81ec15624492397f9c9dcb2499c435d86613d3 Mon Sep 17 00:00:00 2001 From: Chris Earle Date: Thu, 26 Apr 2018 11:32:46 -0400 Subject: [PATCH 09/24] [Monitoring] Remove unhelpful Monitoring tests (#30144) This removes some monitoring tests that have been silenced for a long time. These tests don't really provide any value for the upgrade suite and they just create noise due to their occasional timing-related failures. --- .../UpgradeClusterClientYamlTestSuiteIT.java | 28 ---------- .../test/mixed_cluster/60_monitoring.yml | 48 ----------------- .../test/old_cluster/60_monitoring.yml | 33 ------------ .../test/upgraded_cluster/60_monitoring.yml | 54 ------------------- 4 files changed, 163 deletions(-) delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_monitoring.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_monitoring.yml delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_monitoring.yml diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 168cda6eeda59..c9ad4b3053cbe 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -39,34 +39,6 @@ public void waitForTemplates() throws Exception { XPackRestTestHelper.waitForMlTemplates(client()); } - /** - * Enables an HTTP exporter for monitoring so that we can test the production-level exporter (not the local exporter). - * - * The build.gradle file disables data collection, so the expectation is that any monitoring rest tests will use the - * "_xpack/monitoring/_bulk" endpoint to lazily setup the templates on-demand and fill in data without worrying about - * timing. - */ - @Before - public void waitForMonitoring() throws Exception { - final String[] nodes = System.getProperty("tests.rest.cluster").split(","); - final Map settings = new HashMap<>(); - - settings.put("xpack.monitoring.exporters._http.enabled", true); - // only select the last node to avoid getting the "old" node in a mixed cluster - // if we ever randomize the order that the nodes are restarted (or add more nodes), then we need to verify which node we select - settings.put("xpack.monitoring.exporters._http.host", nodes[nodes.length - 1]); - - assertBusy(() -> { - final ClientYamlTestResponse response = - getAdminExecutionContext().callApi("cluster.put_settings", - emptyMap(), - singletonList(singletonMap("transient", settings)), - emptyMap()); - - assertThat(response.evaluate("acknowledged"), is(true)); - }); - } - @Override protected boolean preserveIndicesUponCompletion() { return true; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_monitoring.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_monitoring.yml deleted file mode 100644 index a5711cfd46c67..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/60_monitoring.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - ---- -"Index monitoring data and search on the mixed cluster": - - skip: - version: "all" - reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948" - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "old_cluster" } } } - - match: { hits.total: 2 } - - - do: - xpack.monitoring.bulk: - system_id: "kibana" - system_api_version: "6" - interval: "123456ms" - type: "mixed_cluster" - body: - - '{"index": {}}' - - '{"field": "value_3"}' - - '{"index": {}}' - - '{"field": "value_4"}' - - '{"index": {}}' - - '{"field": "value_5"}' - - - is_false: errors - - - do: - indices.refresh: {} - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "old_cluster" } } } - - match: { hits.total: 2 } - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "mixed_cluster" } } } - - match: { hits.total: 3 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_monitoring.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_monitoring.yml deleted file mode 100644 index 5185e6b5c2261..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/60_monitoring.yml +++ /dev/null @@ -1,33 +0,0 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - ---- -"Index monitoring data and search on the old cluster": - - skip: - version: "all" - reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948" - - do: - xpack.monitoring.bulk: - system_id: "kibana" - system_api_version: "6" - interval: "123456ms" - type: "old_cluster" - body: - - '{"index": {}}' - - '{"field": "value_1"}' - - '{"index": {}}' - - '{"field": "value_2"}' - - - is_false: errors - - - do: - indices.refresh: {} - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "old_cluster" } } } - - match: { hits.total: 2 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_monitoring.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_monitoring.yml deleted file mode 100644 index 5d3501f778387..0000000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/60_monitoring.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - ---- -"Index monitoring data and search on the upgraded cluster": - - skip: - version: "all" - reason: "AwaitsFix'ing, see x-pack-elasticsearch #2948" - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "old_cluster" } } } - - match: { hits.total: 2 } - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "mixed_cluster" } } } - - match: { hits.total: 3 } - - - do: - xpack.monitoring.bulk: - system_id: "kibana" - system_api_version: "6" - interval: "123456ms" - type: "upgraded_cluster" - body: - - '{"index": {}}' - - '{"field": "value_6"}' - - '{"index": {}}' - - '{"field": "value_7"}' - - '{"index": {}}' - - '{"field": "value_8"}' - - - is_false: errors - - - do: - indices.refresh: {} - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "terms" : { "type": [ "old_cluster", "mixed_cluster" ] } } } - - match: { hits.total: 5 } - - - do: - search: - index: .monitoring-kibana-* - body: { "query": { "term" : { "type": "upgraded_cluster" } } } - - match: { hits.total: 3 } From e864b93abf71a3c513888ed733f375345c838696 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 26 Apr 2018 17:47:16 +0200 Subject: [PATCH 10/24] Fix TermsSetQueryBuilder.doEquals() method (#29629) Closes #29620 --- .../index/query/TermsSetQueryBuilder.java | 14 +++-- .../query/TermsSetQueryBuilderTests.java | 57 ++++++++++++++++--- 2 files changed, 59 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java index b8afe967b05ff..5caabd445b32e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java @@ -18,9 +18,7 @@ */ package org.elasticsearch.index.query; -import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanQuery; @@ -86,6 +84,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(minimumShouldMatchScript); } + // package protected for testing purpose + String getFieldName() { + return fieldName; + } + public List getValues() { return values; } @@ -116,9 +119,10 @@ public TermsSetQueryBuilder setMinimumShouldMatchScript(Script minimumShouldMatc @Override protected boolean doEquals(TermsSetQueryBuilder other) { - return Objects.equals(fieldName, this.fieldName) && Objects.equals(values, this.values) && - Objects.equals(minimumShouldMatchField, this.minimumShouldMatchField) && - Objects.equals(minimumShouldMatchScript, this.minimumShouldMatchScript); + return Objects.equals(fieldName, other.fieldName) + && Objects.equals(values, other.values) + && Objects.equals(minimumShouldMatchField, other.minimumShouldMatchField) + && Objects.equals(minimumShouldMatchScript, other.minimumShouldMatchScript); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java index e445eb1411748..b3cd1a361ecde 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TermsSetQueryBuilderTests.java @@ -59,7 +59,9 @@ import java.util.List; import java.util.Map; import java.util.function.Function; +import java.util.function.Predicate; +import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -85,17 +87,13 @@ protected TermsSetQueryBuilder doCreateTestQueryBuilder() { do { fieldName = randomFrom(MAPPED_FIELD_NAMES); } while (fieldName.equals(GEO_POINT_FIELD_NAME) || fieldName.equals(GEO_SHAPE_FIELD_NAME)); - int numValues = randomIntBetween(0, 10); - List randomTerms = new ArrayList<>(numValues); - for (int i = 0; i < numValues; i++) { - randomTerms.add(getRandomValueForFieldName(fieldName)); - } + List randomTerms = randomValues(fieldName); TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder(STRING_FIELD_NAME, randomTerms); if (randomBoolean()) { queryBuilder.setMinimumShouldMatchField("m_s_m"); } else { queryBuilder.setMinimumShouldMatchScript( - new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap())); + new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap())); } return queryBuilder; } @@ -122,6 +120,41 @@ protected boolean builderGeneratesCacheableQueries() { return false; } + @Override + public TermsSetQueryBuilder mutateInstance(final TermsSetQueryBuilder instance) throws IOException { + String fieldName = instance.getFieldName(); + List values = instance.getValues(); + String minimumShouldMatchField = null; + Script minimumShouldMatchScript = null; + + switch (randomIntBetween(0, 3)) { + case 0: + Predicate predicate = s -> s.equals(instance.getFieldName()) == false && s.equals(GEO_POINT_FIELD_NAME) == false + && s.equals(GEO_SHAPE_FIELD_NAME) == false; + fieldName = randomValueOtherThanMany(predicate, () -> randomFrom(MAPPED_FIELD_NAMES)); + values = randomValues(fieldName); + break; + case 1: + values = randomValues(fieldName); + break; + case 2: + minimumShouldMatchField = randomAlphaOfLengthBetween(1, 10); + break; + case 3: + minimumShouldMatchScript = new Script(ScriptType.INLINE, MockScriptEngine.NAME, randomAlphaOfLength(10), emptyMap()); + break; + } + + TermsSetQueryBuilder newInstance = new TermsSetQueryBuilder(fieldName, values); + if (minimumShouldMatchField != null) { + newInstance.setMinimumShouldMatchField(minimumShouldMatchField); + } + if (minimumShouldMatchScript != null) { + newInstance.setMinimumShouldMatchScript(minimumShouldMatchScript); + } + return newInstance; + } + public void testBothFieldAndScriptSpecified() { TermsSetQueryBuilder queryBuilder = new TermsSetQueryBuilder("_field", Collections.emptyList()); queryBuilder.setMinimumShouldMatchScript(new Script("")); @@ -215,7 +248,7 @@ public void testDoToQuery_msmScriptField() throws Exception { try (IndexReader ir = DirectoryReader.open(directory)) { QueryShardContext context = createShardContext(); - Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", Collections.emptyMap()); + Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script", emptyMap()); Query query = new TermsSetQueryBuilder("message", Arrays.asList("a", "b", "c", "d")) .setMinimumShouldMatchScript(script).doToQuery(context); IndexSearcher searcher = new IndexSearcher(ir); @@ -228,6 +261,16 @@ public void testDoToQuery_msmScriptField() throws Exception { } } + private static List randomValues(final String fieldName) { + final int numValues = randomIntBetween(0, 10); + final List values = new ArrayList<>(numValues); + + for (int i = 0; i < numValues; i++) { + values.add(getRandomValueForFieldName(fieldName)); + } + return values; + } + public static class CustomScriptPlugin extends MockScriptPlugin { @Override From eebcdce7f91a170d572c8a38a2e1e7ad09a133dc Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 26 Apr 2018 08:54:29 -0700 Subject: [PATCH 11/24] [DOCS] Updates docker installation package details (#30110) --- x-pack/docs/en/setup/docker.asciidoc | 32 ++++++++++------------------ 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/x-pack/docs/en/setup/docker.asciidoc b/x-pack/docs/en/setup/docker.asciidoc index c245f9f181c07..a6ee49bc9974e 100644 --- a/x-pack/docs/en/setup/docker.asciidoc +++ b/x-pack/docs/en/setup/docker.asciidoc @@ -3,8 +3,7 @@ === Install {es} with Docker {es} is also available as Docker images. -The images use https://hub.docker.com/_/centos/[centos:7] as the base image and -are available with {xpack-ref}/xpack-introduction.html[X-Pack]. +The images use https://hub.docker.com/_/centos/[centos:7] as the base image. A list of all published Docker images and tags can be found in https://www.docker.elastic.co[www.docker.elastic.co]. The source code can be found @@ -12,28 +11,19 @@ on https://github.com/elastic/elasticsearch-docker/tree/{branch}[GitHub]. ==== Image types -The images are available in three different configurations or "flavors". The -`basic` flavor, which is the default, ships with {xpack} Basic features -pre-installed and automatically activated with a free licence. The `platinum` -flavor features all {xpack} functionally under a 30-day trial licence. The `oss` -flavor does not include {xpack}, and contains only open-source {es}. +These images are free to use under the Elastic license. They contain open source +and free commercial features and access to paid commercial features. +{xpack-ref}/license-management.html[Start a 30-day trial] to try out all of the +paid commercial features. See the +https://www.elastic.co/subscriptions[Subscriptions] page for information about +Elastic license levels. -NOTE: {xpack-ref}/xpack-security.html[X-Pack Security] is enabled in the `platinum` -image. To access your cluster, it's necessary to set an initial password for the -`elastic` user. The initial password can be set at start up time via the -`ELASTIC_PASSWORD` environment variable: +Alternatively, you can download `-oss` images, which contain only features that +are available under the Apache 2.0 license. -["source","txt",subs="attributes"] --------------------------------------------- -docker run -e ELASTIC_PASSWORD=MagicWord {docker-repo}-platinum:{version} --------------------------------------------- - -NOTE: The `platinum` image includes a trial license for 30 days. After that, you -can obtain one of the https://www.elastic.co/subscriptions[available -subscriptions] or revert to a Basic licence. The Basic license is free and -includes a selection of {xpack} features. +==== Pulling the image -Obtaining {Es} for Docker is as simple as issuing a +docker pull+ command +Obtaining {es} for Docker is as simple as issuing a +docker pull+ command against the Elastic Docker registry. ifeval::["{release-state}"=="unreleased"] From cb7e3ffd75f245ffaef25866ce7d318d46158f0f Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Thu, 26 Apr 2018 11:37:04 -0500 Subject: [PATCH 12/24] Remove licenses missed by the migration (#30128) A few of the old style license got kept around because their comment string did not start with a space. This caused the license check to not see it as a license and skip it. This commit cleans it up. --- .../mapper/ExpressionRoleMappingTests.java | 17 ----------------- .../expression/UnresolvedAttributeTests.java | 17 ----------------- .../function/UnresolvedFunctionTests.java | 17 ----------------- 3 files changed, 51 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 510540468a464..2ac82c8d7c063 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -3,23 +3,6 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -/* -* ELASTICSEARCH CONFIDENTIAL -* __________________ -* -* [2017] Elasticsearch Incorporated. All Rights Reserved. -* -* NOTICE: All information contained herein is, and remains -* the property of Elasticsearch Incorporated and its suppliers, -* if any. The intellectual and technical concepts contained -* herein are proprietary to Elasticsearch Incorporated -* and its suppliers and may be covered by U.S. and Foreign Patents, -* patents in process, and are protected by trade secret or copyright law. -* Dissemination of this information or reproduction of this material -* is strictly forbidden unless prior written permission is obtained -* from Elasticsearch Incorporated. -*/ - package org.elasticsearch.xpack.security.authc.support.mapper; import org.elasticsearch.common.ParsingException; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java index 4eb5d15741b9d..51a16c14a8889 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/UnresolvedAttributeTests.java @@ -3,23 +3,6 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -/* -* ELASTICSEARCH CONFIDENTIAL -* __________________ -* -* [2017] Elasticsearch Incorporated. All Rights Reserved. -* -* NOTICE: All information contained herein is, and remains -* the property of Elasticsearch Incorporated and its suppliers, -* if any. The intellectual and technical concepts contained -* herein are proprietary to Elasticsearch Incorporated -* and its suppliers and may be covered by U.S. and Foreign Patents, -* patents in process, and are protected by trade secret or copyright law. -* Dissemination of this information or reproduction of this material -* is strictly forbidden unless prior written permission is obtained -* from Elasticsearch Incorporated. -*/ - package org.elasticsearch.xpack.sql.expression; import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java index b050d37062542..1e58a1a5dc277 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/UnresolvedFunctionTests.java @@ -3,23 +3,6 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -/* -* ELASTICSEARCH CONFIDENTIAL -* __________________ -* -* [2017] Elasticsearch Incorporated. All Rights Reserved. -* -* NOTICE: All information contained herein is, and remains -* the property of Elasticsearch Incorporated and its suppliers, -* if any. The intellectual and technical concepts contained -* herein are proprietary to Elasticsearch Incorporated -* and its suppliers and may be covered by U.S. and Foreign Patents, -* patents in process, and are protected by trade secret or copyright law. -* Dissemination of this information or reproduction of this material -* is strictly forbidden unless prior written permission is obtained -* from Elasticsearch Incorporated. -*/ - package org.elasticsearch.xpack.sql.expression.function; import org.elasticsearch.xpack.sql.expression.Expression; From d40116d260c44214b9023c14e7972d861da74dab Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 26 Apr 2018 09:50:37 -0700 Subject: [PATCH 13/24] Add support for field capabilities to the high-level REST client. (#29664) --- .../org/elasticsearch/client/Request.java | 19 +++ .../client/RestHighLevelClient.java | 27 +++ .../elasticsearch/client/RequestTests.java | 49 +++++- .../org/elasticsearch/client/SearchIT.java | 75 ++++++++- .../documentation/SearchDocumentationIT.java | 98 ++++++++++- .../high-level/search/field-caps.asciidoc | 82 +++++++++ .../high-level/supported-apis.asciidoc | 2 + .../action/fieldcaps/FieldCapabilities.java | 53 +++++- .../fieldcaps/FieldCapabilitiesRequest.java | 8 +- .../fieldcaps/FieldCapabilitiesResponse.java | 43 ++++- .../FieldCapabilitiesRequestTests.java | 11 +- .../FieldCapabilitiesResponseTests.java | 158 +++++++++++++++--- .../fieldcaps/FieldCapabilitiesTests.java | 18 +- 13 files changed, 592 insertions(+), 51 deletions(-) create mode 100644 docs/java-rest/high-level/search/field-caps.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 500130ed39705..d68d3b309af51 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -48,6 +48,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -75,6 +76,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -536,6 +538,16 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); } + static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) { + Params params = Params.builder(); + params.withFields(fieldCapabilitiesRequest.fields()); + params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); + + String[] indices = fieldCapabilitiesRequest.indices(); + String endpoint = endpoint(indices, "_field_caps"); + return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null); + } + static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); Params params = Params.builder(); @@ -712,6 +724,13 @@ Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { return this; } + Params withFields(String[] fields) { + if (fields != null && fields.length > 0) { + return putParam("fields", String.join(",", fields)); + } + return this; + } + Params withMasterTimeout(TimeValue masterTimeout) { return putParam("master_timeout", masterTimeout); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index bf80aa7720741..c6d5e947f2c62 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -30,6 +30,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetRequest; @@ -501,6 +503,31 @@ public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener< headers); } + /** + * Executes a request using the Field Capabilities API. + * + * See Field Capabilities API + * on elastic.co. + */ + public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, + Header... headers) throws IOException { + return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, + FieldCapabilitiesResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously executes a request using the Field Capabilities API. + * + * See Field Capabilities API + * on elastic.co. + */ + public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, + ActionListener listener, + Header... headers) { + performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, + FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); + } + protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index f691c60daa5da..0fdeb7555a04a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -89,6 +90,7 @@ import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; +import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; @@ -108,11 +110,14 @@ import java.lang.reflect.Constructor; import java.lang.reflect.Modifier; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.StringJoiner; import java.util.function.Consumer; import java.util.function.Function; @@ -128,6 +133,8 @@ import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.nullValue; public class RequestTests extends ESTestCase { @@ -1213,6 +1220,47 @@ public void testExistsAliasNoAliasNoIndex() { } } + public void testFieldCaps() { + // Create a random request. + String[] indices = randomIndicesNames(0, 5); + String[] fields = generateRandomStringArray(5, 10, false, false); + + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() + .indices(indices) + .fields(fields); + + Map indicesOptionsParams = new HashMap<>(); + setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, + fieldCapabilitiesRequest::indicesOptions, + indicesOptionsParams); + + Request request = Request.fieldCaps(fieldCapabilitiesRequest); + + // Verify that the resulting REST request looks as expected. + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String joinedIndices = String.join(",", indices); + if (!joinedIndices.isEmpty()) { + endpoint.add(joinedIndices); + } + endpoint.add("_field_caps"); + + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(4, request.getParameters().size()); + + // Note that we don't check the field param value explicitly, as field names are passed through + // a hash set before being added to the request, and can appear in a non-deterministic order. + assertThat(request.getParameters(), hasKey("fields")); + String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields")); + assertEquals(new HashSet<>(Arrays.asList(fields)), + new HashSet<>(Arrays.asList(requestFields))); + + for (Map.Entry param : indicesOptionsParams.entrySet()) { + assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue())); + } + + assertNull(request.getEntity()); + } + public void testRankEval() throws Exception { RankEvalSpec spec = new RankEvalSpec( Collections.singletonList(new RatedRequest("queryId", Collections.emptyList(), new SearchSourceBuilder())), @@ -1233,7 +1281,6 @@ public void testRankEval() throws Exception { assertEquals(3, request.getParameters().size()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(spec, request.getEntity()); - } public void testSplit() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 01ef0598100fb..9828041332b32 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -27,6 +27,9 @@ import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.MultiSearchRequest; @@ -96,14 +99,31 @@ public void indexDocuments() throws IOException { client().performRequest(HttpPut.METHOD_NAME, "/index/type/5", Collections.emptyMap(), doc5); client().performRequest(HttpPost.METHOD_NAME, "/index/_refresh"); - StringEntity doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON); + + StringEntity doc = new StringEntity("{\"field\":\"value1\", \"rating\": 7}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/1", Collections.emptyMap(), doc); doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index1/doc/2", Collections.emptyMap(), doc); - doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON); + + StringEntity mappings = new StringEntity( + "{" + + " \"mappings\": {" + + " \"doc\": {" + + " \"properties\": {" + + " \"rating\": {" + + " \"type\": \"keyword\"" + + " }" + + " }" + + " }" + + " }" + + "}}", + ContentType.APPLICATION_JSON); + client().performRequest("PUT", "/index2", Collections.emptyMap(), mappings); + doc = new StringEntity("{\"field\":\"value1\", \"rating\": \"good\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/3", Collections.emptyMap(), doc); doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index2/doc/4", Collections.emptyMap(), doc); + doc = new StringEntity("{\"field\":\"value1\"}", ContentType.APPLICATION_JSON); client().performRequest(HttpPut.METHOD_NAME, "/index3/doc/5", Collections.emptyMap(), doc); doc = new StringEntity("{\"field\":\"value2\"}", ContentType.APPLICATION_JSON); @@ -713,6 +733,57 @@ public void testMultiSearch_failure() throws Exception { assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue()); } + public void testFieldCaps() throws IOException { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("index1", "index2") + .fields("rating", "field"); + + FieldCapabilitiesResponse response = execute(request, + highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync); + + // Check the capabilities for the 'rating' field. + assertTrue(response.get().containsKey("rating")); + Map ratingResponse = response.getField("rating"); + assertEquals(2, ratingResponse.size()); + + FieldCapabilities expectedKeywordCapabilities = new FieldCapabilities( + "rating", "keyword", true, true, new String[]{"index2"}, null, null); + assertEquals(expectedKeywordCapabilities, ratingResponse.get("keyword")); + + FieldCapabilities expectedLongCapabilities = new FieldCapabilities( + "rating", "long", true, true, new String[]{"index1"}, null, null); + assertEquals(expectedLongCapabilities, ratingResponse.get("long")); + + // Check the capabilities for the 'field' field. + assertTrue(response.get().containsKey("field")); + Map fieldResponse = response.getField("field"); + assertEquals(1, fieldResponse.size()); + + FieldCapabilities expectedTextCapabilities = new FieldCapabilities( + "field", "text", true, false); + assertEquals(expectedTextCapabilities, fieldResponse.get("text")); + } + + public void testFieldCapsWithNonExistentFields() throws IOException { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("index2") + .fields("nonexistent"); + + FieldCapabilitiesResponse response = execute(request, + highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync); + assertTrue(response.get().isEmpty()); + } + + public void testFieldCapsWithNonExistentIndices() { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("non-existent") + .fields("rating"); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(request, highLevelClient()::fieldCaps, highLevelClient()::fieldCapsAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + private static void assertSearchHeader(SearchResponse searchResponse) { assertThat(searchResponse.getTook().nanos(), greaterThanOrEqualTo(0L)); assertEquals(0, searchResponse.getFailedShards()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 52f6984e65107..4400d05a9f820 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -21,8 +21,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollRequest; @@ -93,6 +98,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -157,6 +164,7 @@ public void testSearch() throws Exception { // tag::search-source-setter SearchRequest searchRequest = new SearchRequest(); + searchRequest.indices("posts"); searchRequest.source(sourceBuilder); // end::search-source-setter @@ -699,6 +707,65 @@ public void onFailure(Exception e) { } } + public void testFieldCaps() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + // tag::field-caps-request + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .fields("user") + .indices("posts", "authors", "contributors"); + // end::field-caps-request + + // tag::field-caps-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::field-caps-request-indicesOptions + + // tag::field-caps-execute + FieldCapabilitiesResponse response = client.fieldCaps(request); + // end::field-caps-execute + + // tag::field-caps-response + assertThat(response.get().keySet(), contains("user")); + Map userResponse = response.getField("user"); + + assertThat(userResponse.keySet(), containsInAnyOrder("keyword", "text")); // <1> + FieldCapabilities textCapabilities = userResponse.get("keyword"); + + assertTrue(textCapabilities.isSearchable()); + assertFalse(textCapabilities.isAggregatable()); + + assertArrayEquals(textCapabilities.indices(), // <2> + new String[]{"authors", "contributors"}); + assertNull(textCapabilities.nonSearchableIndices()); // <3> + assertArrayEquals(textCapabilities.nonAggregatableIndices(), // <4> + new String[]{"authors"}); + // end::field-caps-response + + // tag::field-caps-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(FieldCapabilitiesResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::field-caps-execute-listener + + // Replace the empty listener by a blocking listener for tests. + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::field-caps-execute-async + client.fieldCapsAsync(request, listener); // <1> + // end::field-caps-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testRankEval() throws Exception { indexSearchTestData(); RestHighLevelClient client = highLevelClient(); @@ -794,7 +861,7 @@ public void testMultiSearch() throws Exception { MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> assertNull(firstResponse.getFailure()); // <2> SearchResponse searchResponse = firstResponse.getResponse(); // <3> - assertEquals(3, searchResponse.getHits().getTotalHits()); + assertEquals(4, searchResponse.getHits().getTotalHits()); MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4> assertNull(secondResponse.getFailure()); searchResponse = secondResponse.getResponse(); @@ -840,18 +907,35 @@ public void onFailure(Exception e) { } private void indexSearchTestData() throws IOException { - BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts", "doc", "1") + CreateIndexRequest authorsRequest = new CreateIndexRequest("authors") + .mapping("doc", "user", "type=keyword,doc_values=false"); + CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest); + assertTrue(authorsResponse.isAcknowledged()); + + CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors") + .mapping("doc", "user", "type=keyword"); + CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest); + assertTrue(reviewersResponse.isAcknowledged()); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("posts", "doc", "1") .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "2") + bulkRequest.add(new IndexRequest("posts", "doc", "2") .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "3") + bulkRequest.add(new IndexRequest("posts", "doc", "3") .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = highLevelClient().bulk(request); + + bulkRequest.add(new IndexRequest("authors", "doc", "1") + .source(XContentType.JSON, "user", "kimchy")); + bulkRequest.add(new IndexRequest("contributors", "doc", "1") + .source(XContentType.JSON, "user", "tanguy")); + + + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } diff --git a/docs/java-rest/high-level/search/field-caps.asciidoc b/docs/java-rest/high-level/search/field-caps.asciidoc new file mode 100644 index 0000000000000..fef30f629ca61 --- /dev/null +++ b/docs/java-rest/high-level/search/field-caps.asciidoc @@ -0,0 +1,82 @@ +[[java-rest-high-field-caps]] +=== Field Capabilities API + +The field capabilities API allows for retrieving the capabilities of fields across multiple indices. + +[[java-rest-high-field-caps-request]] +==== Field Capabilities Request + +A `FieldCapabilitiesRequest` contains a list of fields to get capabilities for, +should be returned, plus an optional list of target indices. If no indices +are provided, the request will be executed on all indices. + +Note that fields parameter supports wildcard notation. For example, providing `text_*` +will cause all fields that match the expression to be returned. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request] +-------------------------------------------------- + +[[java-rest-high-field-caps-request-optional]] +===== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded. + +[[java-rest-high-field-caps-sync]] +==== Synchronous Execution + +The `fieldCaps` method executes the request synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute] +-------------------------------------------------- + +[[java-rest-high-field-caps-async]] +==== Asynchronous Execution + +The `fieldCapsAsync` method executes the request asynchronously, +calling the provided `ActionListener` when the response is ready: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-async] +-------------------------------------------------- +<1> The `FieldCapabilitiesRequest` to execute and the `ActionListener` to use when +the execution completes. + +The asynchronous method does not block and returns immediately. Once the request +completes, the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `FieldCapabilitiesResponse` is constructed as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `FieldCapabilitiesRequest` fails. + +[[java-rest-high-field-caps-response]] +==== FieldCapabilitiesResponse + +For each requested field, the returned `FieldCapabilitiesResponse` contains its type +and whether or not it can be searched or aggregated on. The response also gives +information about how each index contributes to the field's capabilities. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-response] +-------------------------------------------------- +<1> The `user` field has two possible types, `keyword` and `text`. +<2> This field only has type `keyword` in the `authors` and `contributors` indices. +<3> Null, since the field is searchable in all indices for which it has the `keyword` type. +<4> The `user` field is not aggregatable in the `authors` index. \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 1f3d7a3744300..1c0e09c6c079e 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -32,11 +32,13 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> * <> include::search/search.asciidoc[] include::search/scroll.asciidoc[] include::search/multi-search.asciidoc[] +include::search/field-caps.asciidoc[] include::search/rank-eval.asciidoc[] == Miscellaneous APIs diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index ec6d0902ac98a..21bb452430e7a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -19,11 +19,14 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -36,6 +39,13 @@ * Describes the capabilities of a field optionally merged across multiple indices. */ public class FieldCapabilities implements Writeable, ToXContentObject { + private static final ParseField TYPE_FIELD = new ParseField("type"); + private static final ParseField SEARCHABLE_FIELD = new ParseField("searchable"); + private static final ParseField AGGREGATABLE_FIELD = new ParseField("aggregatable"); + private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField NON_SEARCHABLE_INDICES_FIELD = new ParseField("non_searchable_indices"); + private static final ParseField NON_AGGREGATABLE_INDICES_FIELD = new ParseField("non_aggregatable_indices"); + private final String name; private final String type; private final boolean isSearchable; @@ -52,7 +62,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { * @param isSearchable Whether this field is indexed for search. * @param isAggregatable Whether this field can be aggregated on. */ - FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { + public FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { this(name, type, isSearchable, isAggregatable, null, null, null); } @@ -69,7 +79,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { * @param nonAggregatableIndices The list of indices where this field is not aggregatable, * or null if the field is aggregatable in all indices. */ - FieldCapabilities(String name, String type, + public FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable, String[] indices, String[] nonSearchableIndices, @@ -83,7 +93,7 @@ public class FieldCapabilities implements Writeable, ToXContentObject { this.nonAggregatableIndices = nonAggregatableIndices; } - FieldCapabilities(StreamInput in) throws IOException { + public FieldCapabilities(StreamInput in) throws IOException { this.name = in.readString(); this.type = in.readString(); this.isSearchable = in.readBoolean(); @@ -107,22 +117,47 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("type", type); - builder.field("searchable", isSearchable); - builder.field("aggregatable", isAggregatable); + builder.field(TYPE_FIELD.getPreferredName(), type); + builder.field(SEARCHABLE_FIELD.getPreferredName(), isSearchable); + builder.field(AGGREGATABLE_FIELD.getPreferredName(), isAggregatable); if (indices != null) { - builder.field("indices", indices); + builder.field(INDICES_FIELD.getPreferredName(), indices); } if (nonSearchableIndices != null) { - builder.field("non_searchable_indices", nonSearchableIndices); + builder.field(NON_SEARCHABLE_INDICES_FIELD.getPreferredName(), nonSearchableIndices); } if (nonAggregatableIndices != null) { - builder.field("non_aggregatable_indices", nonAggregatableIndices); + builder.field(NON_AGGREGATABLE_INDICES_FIELD.getPreferredName(), nonAggregatableIndices); } builder.endObject(); return builder; } + public static FieldCapabilities fromXContent(String name, XContentParser parser) throws IOException { + return PARSER.parse(parser, name); + } + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "field_capabilities", + true, + (a, name) -> new FieldCapabilities(name, + (String) a[0], + (boolean) a[1], + (boolean) a[2], + a[3] != null ? ((List) a[3]).toArray(new String[0]) : null, + a[4] != null ? ((List) a[4]).toArray(new String[0]) : null, + a[5] != null ? ((List) a[5]).toArray(new String[0]) : null)); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD); + } + /** * The name of the field. */ diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index b04f882076326..264fa21cf9188 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -61,14 +61,18 @@ public FieldCapabilitiesRequest() {} /** * Returns true iff the results should be merged. + * + * Note that when using the high-level REST client, results are always merged (this flag is always considered 'true'). */ boolean isMergeResults() { return mergeResults; } /** - * if set to true the response will contain only a merged view of the per index field capabilities. Otherwise only - * unmerged per index field capabilities are returned. + * If set to true the response will contain only a merged view of the per index field capabilities. + * Otherwise only unmerged per index field capabilities are returned. + * + * Note that when using the high-level REST client, results are always merged (this flag is always considered 'true'). */ void setMergeResults(boolean mergeResults) { this.mergeResults = mergeResults; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 4b1bcf575899f..5e2202ac073af 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -21,20 +21,29 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** * Response for {@link FieldCapabilitiesRequest} requests. */ public class FieldCapabilitiesResponse extends ActionResponse implements ToXContentFragment { + private static final ParseField FIELDS_FIELD = new ParseField("fields"); + private Map> responseMap; private List indexResponses; @@ -114,10 +123,42 @@ private static void writeField(StreamOutput out, @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("fields", responseMap); + builder.field(FIELDS_FIELD.getPreferredName(), responseMap); return builder; } + public static FieldCapabilitiesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("field_capabilities_response", true, + a -> new FieldCapabilitiesResponse( + ((List>>) a[0]).stream() + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)))); + + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> { + Map typeToCapabilities = parseTypeToCapabilities(p, n); + return new Tuple<>(n, typeToCapabilities); + }, FIELDS_FIELD); + } + + private static Map parseTypeToCapabilities(XContentParser parser, String name) throws IOException { + Map typeToCapabilities = new HashMap<>(); + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); + String type = parser.currentName(); + FieldCapabilities capabilities = FieldCapabilities.fromXContent(name, parser); + typeToCapabilities.put(type, capabilities); + } + return typeToCapabilities; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index 8543b35569a31..9f893ada9c735 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; @@ -80,7 +82,7 @@ public void testEqualsAndHashcode() { } - public void testFieldCapsRequestSerialization() throws IOException { + public void testSerialization() throws IOException { for (int i = 0; i < 20; i++) { FieldCapabilitiesRequest request = randomRequest(); BytesStreamOutput output = new BytesStreamOutput(); @@ -93,4 +95,11 @@ public void testFieldCapsRequestSerialization() throws IOException { assertEquals(deserialized.hashCode(), request.hashCode()); } } + + public void testValidation() { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() + .indices("index2"); + ActionRequestValidationException exception = request.validate(); + assertNotNull(exception); + } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index 2eaf1d4832f3f..c8bd5d5188b67 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -19,42 +19,152 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; -public class FieldCapabilitiesResponseTests extends ESTestCase { - private FieldCapabilitiesResponse randomResponse() { - Map > fieldMap = new HashMap<> (); - int numFields = randomInt(10); - for (int i = 0; i < numFields; i++) { - String fieldName = randomAlphaOfLengthBetween(5, 10); - int numIndices = randomIntBetween(1, 5); - Map indexFieldMap = new HashMap<> (); - for (int j = 0; j < numIndices; j++) { - String index = randomAlphaOfLengthBetween(10, 20); - indexFieldMap.put(index, FieldCapabilitiesTests.randomFieldCaps()); +import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; + +public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected FieldCapabilitiesResponse doParseInstance(XContentParser parser) throws IOException { + return FieldCapabilitiesResponse.fromXContent(parser); + } + + @Override + protected FieldCapabilitiesResponse createBlankInstance() { + return new FieldCapabilitiesResponse(); + } + + @Override + protected FieldCapabilitiesResponse createTestInstance() { + Map> responses = new HashMap<>(); + + String[] fields = generateRandomStringArray(5, 10, false, true); + assertNotNull(fields); + + for (String field : fields) { + Map typesToCapabilities = new HashMap<>(); + String[] types = generateRandomStringArray(5, 10, false, false); + assertNotNull(types); + + for (String type : types) { + typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); } - fieldMap.put(fieldName, indexFieldMap); + responses.put(field, typesToCapabilities); } - return new FieldCapabilitiesResponse(fieldMap); + return new FieldCapabilitiesResponse(responses); } - public void testSerialization() throws IOException { - for (int i = 0; i < 20; i++) { - FieldCapabilitiesResponse response = randomResponse(); - BytesStreamOutput output = new BytesStreamOutput(); - response.writeTo(output); - output.flush(); - StreamInput input = output.bytes().streamInput(); - FieldCapabilitiesResponse deserialized = new FieldCapabilitiesResponse(); - deserialized.readFrom(input); - assertEquals(deserialized, response); - assertEquals(deserialized.hashCode(), response.hashCode()); + @Override + protected FieldCapabilitiesResponse mutateInstance(FieldCapabilitiesResponse response) { + Map> mutatedResponses = new HashMap<>(response.get()); + + int mutation = response.get().isEmpty() ? 0 : randomIntBetween(0, 2); + + switch (mutation) { + case 0: + String toAdd = randomAlphaOfLength(10); + mutatedResponses.put(toAdd, Collections.singletonMap( + randomAlphaOfLength(10), + FieldCapabilitiesTests.randomFieldCaps(toAdd))); + break; + case 1: + String toRemove = randomFrom(mutatedResponses.keySet()); + mutatedResponses.remove(toRemove); + break; + case 2: + String toReplace = randomFrom(mutatedResponses.keySet()); + mutatedResponses.put(toReplace, Collections.singletonMap( + randomAlphaOfLength(10), + FieldCapabilitiesTests.randomFieldCaps(toReplace))); + break; } + return new FieldCapabilitiesResponse(mutatedResponses); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // Disallow random fields from being inserted under the 'fields' key, as this + // map only contains field names, and also under 'fields.FIELD_NAME', as these + // maps only contain type names. + return field -> field.matches("fields(\\.\\w+)?"); + } + + public void testToXContent() throws IOException { + FieldCapabilitiesResponse response = createSimpleResponse(); + + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON) + .startObject(); + response.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String generatedResponse = BytesReference.bytes(builder).utf8ToString(); + assertEquals(( + "{" + + " \"fields\": {" + + " \"rating\": { " + + " \"keyword\": {" + + " \"type\": \"keyword\"," + + " \"searchable\": false," + + " \"aggregatable\": true," + + " \"indices\": [\"index3\", \"index4\"]," + + " \"non_searchable_indices\": [\"index4\"] " + + " }," + + " \"long\": {" + + " \"type\": \"long\"," + + " \"searchable\": true," + + " \"aggregatable\": false," + + " \"indices\": [\"index1\", \"index2\"]," + + " \"non_aggregatable_indices\": [\"index1\"] " + + " }" + + " }," + + " \"title\": { " + + " \"text\": {" + + " \"type\": \"text\"," + + " \"searchable\": true," + + " \"aggregatable\": false" + + " }" + + " }" + + " }" + + "}").replaceAll("\\s+", ""), generatedResponse); + } + + private static FieldCapabilitiesResponse createSimpleResponse() { + Map titleCapabilities = new HashMap<>(); + titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false)); + + Map ratingCapabilities = new HashMap<>(); + ratingCapabilities.put("long", new FieldCapabilities("rating", "long", + true, false, + new String[]{"index1", "index2"}, + null, + new String[]{"index1"})); + ratingCapabilities.put("keyword", new FieldCapabilities("rating", "keyword", + false, true, + new String[]{"index3", "index4"}, + new String[]{"index4"}, + null)); + + Map> responses = new HashMap<>(); + responses.put("title", titleCapabilities); + responses.put("rating", ratingCapabilities); + return new FieldCapabilitiesResponse(responses); } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java index 53c27645bf298..0237ace962a80 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java @@ -20,16 +20,26 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import java.io.IOException; import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; -public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase { +public class FieldCapabilitiesTests extends AbstractSerializingTestCase { + private static final String FIELD_NAME = "field"; + + @Override + protected FieldCapabilities doParseInstance(XContentParser parser) throws IOException { + return FieldCapabilities.fromXContent(FIELD_NAME, parser); + } + @Override protected FieldCapabilities createTestInstance() { - return randomFieldCaps(); + return randomFieldCaps(FIELD_NAME); } @Override @@ -82,7 +92,7 @@ public void testBuilder() { } } - static FieldCapabilities randomFieldCaps() { + static FieldCapabilities randomFieldCaps(String fieldName) { String[] indices = null; if (randomBoolean()) { indices = new String[randomIntBetween(1, 5)]; @@ -104,7 +114,7 @@ static FieldCapabilities randomFieldCaps() { nonAggregatableIndices[i] = randomAlphaOfLengthBetween(5, 20); } } - return new FieldCapabilities(randomAlphaOfLengthBetween(5, 20), + return new FieldCapabilities(fieldName, randomAlphaOfLengthBetween(5, 20), randomBoolean(), randomBoolean(), indices, nonSearchableIndices, nonAggregatableIndices); } From 21dbf9fab01b2018de2a13eabd981e4b7cb917f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 26 Apr 2018 19:44:21 +0200 Subject: [PATCH 14/24] Document time unit limitations for date histograms (#30177) Adding some allowed abbreviated values for intervals in date histograms as well as documenting the limitations of intervals larger than days. Closes #23294 --- .../aggregations/bucket/datehistogram-aggregation.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 30ea2832a700e..c2d1614ad6e56 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -27,11 +27,13 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second` +Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`), +`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`) Time values can also be specified via abbreviations supported by <> parsing. Note that fractional time values are not supported, but you can address this by shifting to another -time unit (e.g., `1.5h` could instead be specified as `90m`). +time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than +than days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not). [source,js] -------------------------------------------------- From d0416c76fe490f8094f5b163e0b3691c1060a59d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Thu, 26 Apr 2018 20:06:47 +0200 Subject: [PATCH 15/24] Fix edge cases in CompositeKeyExtractorTests (#30175) Currently the test picks random java.util.TimeZone ids in some places. Internally we still need to convert back to joda DateTimeZone by id occassionally (e.g. when serializing to pre 6.3 versions). There are some deprecated "SystemV/*" time zones that Jodas DateTimeZone refuses to convert. This change excludes those rare cases from the set of allowed random time zones. It would be quiet odd for them to appear in practice. Closes #30156 --- .../extractor/CompositeKeyExtractorTests.java | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java index b7829a246e028..11068372bcc8a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/CompositeKeyExtractorTests.java @@ -15,6 +15,7 @@ import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.TimeZone; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; @@ -24,7 +25,7 @@ public class CompositeKeyExtractorTests extends AbstractWireSerializingTestCase { public static CompositeKeyExtractor randomCompositeKeyExtractor() { - return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomTimeZone()); + return new CompositeKeyExtractor(randomAlphaOfLength(16), randomFrom(asList(Property.values())), randomSafeTimeZone()); } @Override @@ -58,7 +59,7 @@ public void testExtractKey() { } public void testExtractDate() { - CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomTimeZone()); + CompositeKeyExtractor extractor = new CompositeKeyExtractor(randomAlphaOfLength(16), Property.VALUE, randomSafeTimeZone()); long millis = System.currentTimeMillis(); Bucket bucket = new TestBucket(singletonMap(extractor.key(), millis), randomLong(), new Aggregations(emptyList())); @@ -73,4 +74,13 @@ public void testExtractIncorrectDateKey() { SqlIllegalArgumentException exception = expectThrows(SqlIllegalArgumentException.class, () -> extractor.extract(bucket)); assertEquals("Invalid date key returned: " + value, exception.getMessage()); } + + /** + * We need to exclude SystemV/* time zones because they cannot be converted + * back to DateTimeZone which we currently still need to do internally, + * e.g. in bwc serialization and in the extract() method + */ + private static TimeZone randomSafeTimeZone() { + return randomValueOtherThanMany(tz -> tz.getID().startsWith("SystemV"), () -> randomTimeZone()); + } } \ No newline at end of file From 4cfca2fbd0eadaa831d9061a3144d878a5c440c5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 26 Apr 2018 11:51:48 -0700 Subject: [PATCH 16/24] Build: Fix deb version to use tilde with prerelease versions (#29000) This commit converts the deb package to use tildes in place of dash in the internal package version. This is only relevant for prerelease versions of elasticsearch. Previously, this was not possible due to problems with the underlying library used by the ospackage plugin, but since a recent upgrade, it now works. closes #21139 --- distribution/packages/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 6e5b69a66f7df..33f98386a8987 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -270,7 +270,7 @@ Closure commonDebConfig(boolean oss) { customFields['License'] = 'Elastic-License' } - version = project.version + version = project.version.replace('-', '~') packageGroup 'web' requires 'bash' requires 'libc6' From 80e0e64bfe970ddd15ec1d7a910fa6515fb01fe1 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 26 Apr 2018 22:18:39 +0200 Subject: [PATCH 17/24] Fix SliceBuilderTests#testRandom failures Add missing shard context creation in a random test. --- .../java/org/elasticsearch/search/slice/SliceBuilderTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index d6bcbfa8e6b7d..b93ebc1adde72 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -423,6 +423,7 @@ public void testToFilterRandom() throws IOException { for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { SliceBuilder slice = new SliceBuilder("_id", i, numSlices); + context = createShardContext(Version.CURRENT, reader, "_id", DocValuesType.SORTED, numShards, j); Query q = slice.toFilter(null, createRequest(j), context, Version.CURRENT); if (i == j) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); From 55e1b1e8b592ca58aa499a0a17a271fb5ec6db6b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 26 Apr 2018 15:36:14 -0700 Subject: [PATCH 18/24] Tests: Use buildDir as base for generated-resources (#30191) This commit moves the generated-resources directory created by many qa projects into the build directory, so it is not seen as unknown files to git. --- x-pack/qa/full-cluster-restart/build.gradle | 4 ++-- x-pack/qa/rolling-upgrade-basic/build.gradle | 2 +- x-pack/qa/rolling-upgrade/build.gradle | 4 ++-- x-pack/qa/security-client-tests/build.gradle | 2 +- x-pack/qa/smoke-test-plugins-ssl/build.gradle | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index a51f764308072..8f5952d61edfb 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -114,7 +114,7 @@ subprojects { approvedLicenses << 'Apache' } - String outputDir = "generated-resources/${project.name}" + String outputDir = "${buildDir}/generated-resources/${project.name}" // This is a top level task which we will add dependencies to below. // It is a single task that can be used to backcompat tests against all versions. @@ -123,7 +123,7 @@ subprojects { group = 'verification' } - String output = "generated-resources/${project.name}" + String output = "${buildDir}/generated-resources/${project.name}" task copyTestNodeKeystore(type: Copy) { from project(xpackModule('core')) .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 7fc2c44522bf9..bb1b5c58c4aa5 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -71,7 +71,7 @@ task bwcTest { group = 'verification' } -String outputDir = "generated-resources/${project.name}" +String outputDir = "${buildDir}/generated-resources/${project.name}" for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index bc49f33549a37..433dc08e1f39f 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -96,7 +96,7 @@ subprojects { } } - String outputDir = "generated-resources/${project.name}" + String outputDir = "${buildDir}/generated-resources/${project.name}" // This is a top level task which we will add dependencies to below. // It is a single task that can be used to backcompat tests against all versions. @@ -105,7 +105,7 @@ subprojects { group = 'verification' } - String output = "generated-resources/${project.name}" + String output = "${buildDir}/generated-resources/${project.name}" task copyTestNodeKeystore(type: Copy) { from project(xpackModule('core')) .file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') diff --git a/x-pack/qa/security-client-tests/build.gradle b/x-pack/qa/security-client-tests/build.gradle index 53d4ed464b5af..4e517f4d3633e 100644 --- a/x-pack/qa/security-client-tests/build.gradle +++ b/x-pack/qa/security-client-tests/build.gradle @@ -6,7 +6,7 @@ dependencies { testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime') } -String outputDir = "generated-resources/${project.name}" +String outputDir = "${buildDir}/generated-resources/${project.name}" task copyXPackPluginProps(type: Copy) { from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 09866421f21a9..bc7aa9fd39328 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -17,7 +17,7 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'runtime') } -String outputDir = "generated-resources/${project.name}" +String outputDir = "${buildDir}/generated-resources/${project.name}" task copyXPackPluginProps(type: Copy) { from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties From 804c38303f3c9b0094c0e49c88b2ec3175ef6e4f Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 27 Apr 2018 01:43:44 +0300 Subject: [PATCH 19/24] SQL: Add BinaryMathProcessor to named writeables list (#30127) BinaryMathProcessor was missing from the list of register named writeables causing deserialization errors Fix #30014 --- .../function/scalar/Processors.java | 2 + .../scalar/math/BinaryMathProcessorTests.java | 59 +++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index d701d8fbd6144..2084ad684df67 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -10,6 +10,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor; @@ -40,6 +41,7 @@ public static List getNamedWriteables() { // arithmetic entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new)); entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new)); + entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); // datetime entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new)); // math diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java new file mode 100644 index 0000000000000..6563760d22512 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/BinaryMathProcessorTests.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.math; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor; + +import static org.elasticsearch.xpack.sql.tree.Location.EMPTY; + +public class BinaryMathProcessorTests extends AbstractWireSerializingTestCase { + public static BinaryMathProcessor randomProcessor() { + return new BinaryMathProcessor( + new ConstantProcessor(randomLong()), + new ConstantProcessor(randomLong()), + randomFrom(BinaryMathProcessor.BinaryMathOperation.values())); + } + + @Override + protected BinaryMathProcessor createTestInstance() { + return randomProcessor(); + } + + @Override + protected Reader instanceReader() { + return BinaryMathProcessor::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Processors.getNamedWriteables()); + } + + public void testAtan2() { + Processor ba = new ATan2(EMPTY, l(1), l(1)).makeProcessorDefinition().asProcessor(); + assertEquals(0.7853981633974483d, ba.process(null)); + } + + public void testPower() { + Processor ba = new Power(EMPTY, l(2), l(2)).makeProcessorDefinition().asProcessor(); + assertEquals(4d, ba.process(null)); + } + + public void testHandleNull() { + assertNull(new ATan2(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null)); + assertNull(new Power(EMPTY, l(null), l(null)).makeProcessorDefinition().asProcessor().process(null)); + } + + private static Literal l(Object value) { + return Literal.of(EMPTY, value); + } +} \ No newline at end of file From 0d8aed8c2b4963f529b6f93a88abf0ed9df4ffff Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Thu, 26 Apr 2018 16:09:27 -0700 Subject: [PATCH 20/24] Fix a bug in FieldCapabilitiesRequest#equals and hashCode. (#30181) Also update its unit test to AbstractStreamableTestCase for better coverage. --- .../fieldcaps/FieldCapabilitiesRequest.java | 16 ++-- .../FieldCapabilitiesRequestTests.java | 82 ++++++++----------- 2 files changed, 42 insertions(+), 56 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 264fa21cf9188..e91d9a703f491 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -162,17 +162,17 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; FieldCapabilitiesRequest that = (FieldCapabilitiesRequest) o; - - if (!Arrays.equals(indices, that.indices)) return false; - if (!indicesOptions.equals(that.indicesOptions)) return false; - return Arrays.equals(fields, that.fields); + return Arrays.equals(indices, that.indices) && + Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(fields, that.fields) && + Objects.equals(mergeResults, that.mergeResults); } @Override public int hashCode() { - int result = Arrays.hashCode(indices); - result = 31 * result + indicesOptions.hashCode(); - result = 31 * result + Arrays.hashCode(fields); - return result; + return Objects.hash(Arrays.hashCode(indices), + indicesOptions, + Arrays.hashCode(fields), + mergeResults); } } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index 9f893ada9c735..4e4bd7fdf5534 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -21,15 +21,18 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.test.AbstractStreamableTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; -public class FieldCapabilitiesRequestTests extends ESTestCase { - private FieldCapabilitiesRequest randomRequest() { +public class FieldCapabilitiesRequestTests extends AbstractStreamableTestCase { + + @Override + protected FieldCapabilitiesRequest createTestInstance() { FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); int size = randomIntBetween(1, 20); String[] randomFields = new String[size]; @@ -50,50 +53,33 @@ private FieldCapabilitiesRequest randomRequest() { return request; } - public void testEqualsAndHashcode() { - FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); - request.indices("foo"); - request.indicesOptions(IndicesOptions.lenientExpandOpen()); - request.fields("bar"); - - FieldCapabilitiesRequest other = new FieldCapabilitiesRequest(); - other.indices("foo"); - other.indicesOptions(IndicesOptions.lenientExpandOpen()); - other.fields("bar"); - assertEquals(request, request); - assertEquals(request, other); - assertEquals(request.hashCode(), other.hashCode()); - - // change indices - other.indices("foo", "bar"); - assertNotEquals(request, other); - other.indices("foo"); - assertEquals(request, other); - - // change fields - other.fields("foo", "bar"); - assertNotEquals(request, other); - other.fields("bar"); - assertEquals(request, request); - - // change indices options - other.indicesOptions(IndicesOptions.strictExpand()); - assertNotEquals(request, other); - + @Override + protected FieldCapabilitiesRequest createBlankInstance() { + return new FieldCapabilitiesRequest(); } - public void testSerialization() throws IOException { - for (int i = 0; i < 20; i++) { - FieldCapabilitiesRequest request = randomRequest(); - BytesStreamOutput output = new BytesStreamOutput(); - request.writeTo(output); - output.flush(); - StreamInput input = output.bytes().streamInput(); - FieldCapabilitiesRequest deserialized = new FieldCapabilitiesRequest(); - deserialized.readFrom(input); - assertEquals(deserialized, request); - assertEquals(deserialized.hashCode(), request.hashCode()); - } + @Override + protected FieldCapabilitiesRequest mutateInstance(FieldCapabilitiesRequest instance) throws IOException { + List> mutators = new ArrayList<>(); + mutators.add(request -> { + String[] fields = ArrayUtils.concat(request.fields(), new String[] {randomAlphaOfLength(10)}); + request.fields(fields); + }); + mutators.add(request -> { + String[] indices = ArrayUtils.concat(instance.indices(), generateRandomStringArray(5, 10, false, false)); + request.indices(indices); + }); + mutators.add(request -> { + IndicesOptions indicesOptions = randomValueOtherThan(request.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + request.indicesOptions(indicesOptions); + }); + mutators.add(request -> request.setMergeResults(!request.isMergeResults())); + + FieldCapabilitiesRequest mutatedInstance = copyInstance(instance); + Consumer mutator = randomFrom(mutators); + mutator.accept(mutatedInstance); + return mutatedInstance; } public void testValidation() { From 592481e4ed32412ed1d65a879fe4b3f16c511f41 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 26 Apr 2018 21:42:44 -0400 Subject: [PATCH 21/24] Require acknowledgement to start_trial license (#30135) This is related to #30134. It modifies the start_trial action to require an acknowledgement parameter in the rest request to actually start the trial license. There are backwards compatibility issues as prior ES versions did not support this parameter. To handle this, it is assumed that a request coming from a node prior to 6.3 is acknowledged. And attempts to write a non-acknowledged request to a prior to 6.3 node will throw an exception. Additionally this PR adds messages about the trial license the user is generating. --- x-pack/docs/build.gradle | 1 - .../en/rest-api/license/start-trial.asciidoc | 25 ++++++- .../license/LicensingClient.java | 4 +- .../license/PostStartTrialRequest.java | 31 +++++++- .../license/PostStartTrialRequestBuilder.java | 5 ++ .../license/PostStartTrialResponse.java | 73 ++++++++++++++++++- .../license/RestGetTrialStatus.java | 2 +- .../license/RestPostStartTrialLicense.java | 30 ++++++-- .../license/StartTrialClusterTask.java | 22 +++++- .../license/StartTrialLicenseTests.java | 44 +++++++---- .../api/xpack.license.post_start_trial.json | 4 + .../test/license/20_put_license.yml | 4 +- 12 files changed, 206 insertions(+), 39 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index ae851732daac8..ab9bc99459968 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -61,7 +61,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/watcher/trigger/schedule/yearly.asciidoc', 'en/watcher/troubleshooting.asciidoc', 'en/rest-api/license/delete-license.asciidoc', - 'en/rest-api/license/start-trial.asciidoc', 'en/rest-api/license/update-license.asciidoc', 'en/ml/api-quickref.asciidoc', 'en/rest-api/ml/delete-calendar-event.asciidoc', diff --git a/x-pack/docs/en/rest-api/license/start-trial.asciidoc b/x-pack/docs/en/rest-api/license/start-trial.asciidoc index 8ff793455a239..7754f6feef79c 100644 --- a/x-pack/docs/en/rest-api/license/start-trial.asciidoc +++ b/x-pack/docs/en/rest-api/license/start-trial.asciidoc @@ -40,7 +40,7 @@ The following example checks whether you are eligible to start a trial: [source,js] ------------------------------------------------------------ -POST _xpack/license/start_trial +GET _xpack/license/start_trial ------------------------------------------------------------ // CONSOLE // TEST[skip:license testing issues] @@ -49,6 +49,27 @@ Example response: [source,js] ------------------------------------------------------------ { - "trial_was_started": true + "eligible_to_start_trial": true } ------------------------------------------------------------ +// NOTCONSOLE + +The following example starts a 30-day trial license. The acknowledge +parameter is required as you are initiating a license that will expire. + +[source,js] +------------------------------------------------------------ +POST _xpack/license/start_trial?acknowledge=true +------------------------------------------------------------ +// CONSOLE +// TEST[skip:license testing issues] + +Example response: +[source,js] +------------------------------------------------------------ +{ + "trial_was_started": true, + "acknowledged": true +} +------------------------------------------------------------ +// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java index d2d4461b93108..21381b376925d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensingClient.java @@ -41,11 +41,11 @@ public void deleteLicense(DeleteLicenseRequest request, ActionListener { + private boolean acknowledge = false; private String type; @Override @@ -31,25 +32,47 @@ public String getType() { return type; } + public PostStartTrialRequest acknowledge(boolean acknowledge) { + this.acknowledge = acknowledge; + return this; + } + + public boolean isAcknowledged() { + return acknowledge; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); if (in.getVersion().onOrAfter(Version.V_6_3_0)) { type = in.readString(); + acknowledge = in.readBoolean(); } else { type = "trial"; + acknowledge = true; } } @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - Version version = Version.V_6_3_0; + // TODO: Change to 6.3 after backport + Version version = Version.V_7_0_0_alpha1; if (out.getVersion().onOrAfter(version)) { + super.writeTo(out); out.writeString(type); + out.writeBoolean(acknowledge); } else { - throw new IllegalArgumentException("All nodes in cluster must be version [" + version - + "] or newer to use `type` parameter. Attempting to write to node with version [" + out.getVersion() + "]."); + if ("trial".equals(type) == false) { + throw new IllegalArgumentException("All nodes in cluster must be version [" + version + + "] or newer to start trial with a different type than 'trial'. Attempting to write to " + + "a node with version [" + out.getVersion() + "] with trial type [" + type + "]."); + } else if (acknowledge == false) { + throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " + + "prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " + + "without acknowledgement."); + } else { + super.writeTo(out); + } } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java index af381e13517f8..6b0beba171bdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialRequestBuilder.java @@ -14,4 +14,9 @@ class PostStartTrialRequestBuilder extends ActionRequestBuilder acknowledgeMessages; + private String acknowledgeMessage; PostStartTrialResponse() { } PostStartTrialResponse(Status status) { + this(status, Collections.emptyMap(), null); + } + + PostStartTrialResponse(Status status, Map acknowledgeMessages, String acknowledgeMessage) { this.status = status; + this.acknowledgeMessages = acknowledgeMessages; + this.acknowledgeMessage = acknowledgeMessage; } public Status getStatus() { @@ -57,10 +76,58 @@ public Status getStatus() { @Override public void readFrom(StreamInput in) throws IOException { status = in.readEnum(Status.class); + // TODO: Change to 6.3 after backport + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + acknowledgeMessage = in.readOptionalString(); + int size = in.readVInt(); + Map acknowledgeMessages = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String feature = in.readString(); + int nMessages = in.readVInt(); + String[] messages = new String[nMessages]; + for (int j = 0; j < nMessages; j++) { + messages[j] = in.readString(); + } + acknowledgeMessages.put(feature, messages); + } + this.acknowledgeMessages = acknowledgeMessages; + } else { + this.acknowledgeMessages = Collections.emptyMap(); + } } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(status); + // TODO: Change to 6.3 after backport + Version version = Version.V_7_0_0_alpha1; + if (out.getVersion().onOrAfter(version)) { + out.writeEnum(status); + out.writeOptionalString(acknowledgeMessage); + out.writeVInt(acknowledgeMessages.size()); + for (Map.Entry entry : acknowledgeMessages.entrySet()) { + out.writeString(entry.getKey()); + out.writeVInt(entry.getValue().length); + for (String message : entry.getValue()) { + out.writeString(message); + } + } + } else { + if (status == Status.UPGRADED_TO_TRIAL) { + out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL); + } else if (status == Status.TRIAL_ALREADY_ACTIVATED) { + out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED); + } else { + throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " + + "acknowledgement parameter."); + } + } + } + + Map getAcknowledgementMessages() { + return acknowledgeMessages; + } + + String getAcknowledgementMessage() { + return acknowledgeMessage; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java index ebd43318ff91e..a136f2a88a65d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestGetTrialStatus.java @@ -29,7 +29,7 @@ public class RestGetTrialStatus extends XPackRestHandler { @Override protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { - return channel -> client.licensing().prepareGetUpgradeToTrial().execute( + return channel -> client.licensing().prepareGetStartTrial().execute( new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetTrialStatusResponse response, XContentBuilder builder) throws Exception { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java index 0332eedd69dd1..af738b9aadf7f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.core.rest.XPackRestHandler; import java.io.IOException; +import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -30,23 +31,36 @@ public class RestPostStartTrialLicense extends XPackRestHandler { protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException { PostStartTrialRequest startTrialRequest = new PostStartTrialRequest(); startTrialRequest.setType(request.param("type", "trial")); + startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); return channel -> client.licensing().postStartTrial(startTrialRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception { PostStartTrialResponse.Status status = response.getStatus(); + builder.startObject(); + builder.field("acknowledged", startTrialRequest.isAcknowledged()); if (status.isTrialStarted()) { - builder.startObject() - .field("trial_was_started", true) - .field("type", startTrialRequest.getType()) - .endObject(); + builder.field("trial_was_started", true); + builder.field("type", startTrialRequest.getType()); } else { - builder.startObject() - .field("trial_was_started", false) - .field("error_message", status.getErrorMessage()) - .endObject(); + builder.field("trial_was_started", false); + builder.field("error_message", status.getErrorMessage()); + } + Map acknowledgementMessages = response.getAcknowledgementMessages(); + if (acknowledgementMessages.isEmpty() == false) { + builder.startObject("acknowledge"); + builder.field("message", response.getAcknowledgementMessage()); + for (Map.Entry entry : acknowledgementMessages.entrySet()) { + builder.startArray(entry.getKey()); + for (String message : entry.getValue()) { + builder.value(message); + } + builder.endArray(); + } + builder.endObject(); } + builder.endObject(); return new BytesRestResponse(status.getRestStatus(), builder); } }); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 3ca8dbf0eaa4e..355672dedf717 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -15,10 +15,23 @@ import org.elasticsearch.common.Nullable; import java.time.Clock; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; public class StartTrialClusterTask extends ClusterStateUpdateTask { + private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all platinum features. " + + "By starting this trial, you agree that it is subject to the terms and conditions at" + + " https://www.elastic.co/legal/trial_license/. To begin your free trial, call /start_trial again and specify " + + "the \"acknowledge=true\" parameter."; + + private static final Map ACK_MESSAGES = Collections.singletonMap("security", + new String[] {"With a trial license, X-Pack security features are available, but are not enabled by default."}); + private final Logger logger; private final String clusterName; private final PostStartTrialRequest request; @@ -39,7 +52,10 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS LicensesMetaData oldLicensesMetaData = oldState.metaData().custom(LicensesMetaData.TYPE); logger.debug("started self generated trial license: {}", oldLicensesMetaData); - if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) { + if (request.isAcknowledged() == false) { + listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.NEED_ACKNOWLEDGEMENT, + ACK_MESSAGES, ACKNOWLEDGEMENT_HEADER)); + } else if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) { listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.UPGRADED_TO_TRIAL)); } else { listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.TRIAL_ALREADY_ACTIVATED)); @@ -50,7 +66,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS public ClusterState execute(ClusterState currentState) throws Exception { LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE); - if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) { + if (request.isAcknowledged() == false) { + return currentState; + } else if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) { long issueDate = clock.millis(); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); long expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java index d673c4e720452..b7a09d24b1359 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/StartTrialLicenseTests.java @@ -56,33 +56,47 @@ public void testStartTrial() throws Exception { assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals("{\"eligible_to_start_trial\":true}", body); - String type = randomFrom(LicenseService.VALID_TRIAL_TYPES); - - Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + type); + // Test that starting will fail without acknowledgement + Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial"); String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8)); assertEquals(200, response2.getStatusLine().getStatusCode()); - assertTrue(body2.contains("\"trial_was_started\":true")); - assertTrue(body2.contains("\"type\":\"" + type + "\"")); + assertTrue(body2.contains("\"trial_was_started\":false")); + assertTrue(body2.contains("\"error_message\":\"Operation failed: Needs acknowledgement.\"")); + assertTrue(body2.contains("\"acknowledged\":false")); assertBusy(() -> { - GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get(); - assertEquals(type, postTrialLicenseResponse.license().type()); + GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals("basic", getLicenseResponse.license().type()); }); - Response response3 = restClient.performRequest("GET", "/_xpack/license/trial_status"); + String type = randomFrom(LicenseService.VALID_TRIAL_TYPES); + + Response response3 = restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + type); String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8)); assertEquals(200, response3.getStatusLine().getStatusCode()); - assertEquals("{\"eligible_to_start_trial\":false}", body3); + assertTrue(body3.contains("\"trial_was_started\":true")); + assertTrue(body3.contains("\"type\":\"" + type + "\"")); + assertTrue(body3.contains("\"acknowledged\":true")); + + assertBusy(() -> { + GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get(); + assertEquals(type, postTrialLicenseResponse.license().type()); + }); + + Response response4 = restClient.performRequest("GET", "/_xpack/license/trial_status"); + String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(200, response4.getStatusLine().getStatusCode()); + assertEquals("{\"eligible_to_start_trial\":false}", body4); String secondAttemptType = randomFrom(LicenseService.VALID_TRIAL_TYPES); ResponseException ex = expectThrows(ResponseException.class, - () -> restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + secondAttemptType)); - Response response4 = ex.getResponse(); - String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8)); - assertEquals(403, response4.getStatusLine().getStatusCode()); - assertTrue(body4.contains("\"trial_was_started\":false")); - assertTrue(body4.contains("\"error_message\":\"Operation failed: Trial was already activated.\"")); + () -> restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + secondAttemptType)); + Response response5 = ex.getResponse(); + String body5 = Streams.copyToString(new InputStreamReader(response5.getEntity().getContent(), StandardCharsets.UTF_8)); + assertEquals(403, response5.getStatusLine().getStatusCode()); + assertTrue(body5.contains("\"trial_was_started\":false")); + assertTrue(body5.contains("\"error_message\":\"Operation failed: Trial was already activated.\"")); } public void testInvalidType() throws Exception { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json index 688afc7b79bbf..a1e5d27da1eda 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/xpack.license.post_start_trial.json @@ -11,6 +11,10 @@ "type": { "type" : "string", "description" : "The type of trial license to generate (default: \"trial\")" + }, + "acknowledge": { + "type" : "boolean", + "description" : "whether the user has acknowledged acknowledge messages (default: false)" } } }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml index 98e96318d7a19..9eb3b79fda7a7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/license/20_put_license.yml @@ -133,7 +133,8 @@ teardown: - do: catch: forbidden - xpack.license.post_start_trial: {} + xpack.license.post_start_trial: + acknowledge: true - match: { trial_was_started: false } - match: { error_message: "Operation failed: Trial was already activated." } @@ -143,6 +144,7 @@ teardown: catch: bad_request xpack.license.post_start_trial: type: "basic" + acknowledge: true --- "Can start basic license if do not already have basic": - do: From e0b8893645d88296bccb5d6bf7604755d6998cee Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Fri, 27 Apr 2018 09:24:25 +0300 Subject: [PATCH 22/24] SQL: Correct error message (#30138) * SQL: Correct error message Error messages had placeholders that were not replaced; this PR fixes that Fix #30016 --- .../xpack/sql/planner/QueryTranslator.java | 20 +++---- .../analyzer/FieldAttributeTests.java | 2 +- .../logical/command/sys/SysColumnsTests.java | 6 +- .../sql/planner/QueryTranslatorTests.java | 55 +++++++++++++++++++ .../mapping-multi-field-variation.json | 1 + 5 files changed, 70 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 2c90921e28599..dd0456e9aefc8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -10,6 +10,7 @@ import org.elasticsearch.xpack.sql.expression.BinaryExpression; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.ExpressionId; +import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.expression.NamedExpression; @@ -159,7 +160,7 @@ static QueryTranslation toQuery(Expression e, boolean onAggs) { } } - throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", e.nodeName(), e)); + throw new SqlIllegalArgumentException("Don't know how to translate {} {}", e.nodeName(), e); } static LeafAgg toAgg(String id, Function f) { @@ -171,7 +172,7 @@ static LeafAgg toAgg(String id, Function f) { } } - throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", f.nodeName(), f)); + throw new SqlIllegalArgumentException("Don't know how to translate {} {}", f.nodeName(), f); } static class GroupingContext { @@ -395,8 +396,8 @@ static String field(AggregateFunction af) { if (arg instanceof Literal) { return String.valueOf(((Literal) arg).value()); } - throw new SqlIllegalArgumentException("Does not know how to convert argument " + arg.nodeString() - + " for function " + af.nodeString()); + throw new SqlIllegalArgumentException("Does not know how to convert argument {} for function {}", arg.nodeString(), + af.nodeString()); } // TODO: need to optimize on ngram @@ -505,9 +506,9 @@ static class BinaryComparisons extends ExpressionTranslator { @Override protected QueryTranslation asQuery(BinaryComparison bc, boolean onAggs) { Check.isTrue(bc.right().foldable(), - "Line %d:%d - Comparisons against variables are not (currently) supported; offender %s in %s", + "Line {}:{}: Comparisons against variables are not (currently) supported; offender [{}] in [{}]", bc.right().location().getLineNumber(), bc.right().location().getColumnNumber(), - bc.right().nodeName(), bc.nodeName()); + Expressions.name(bc.right()), bc.symbol()); if (bc.left() instanceof NamedExpression) { NamedExpression ne = (NamedExpression) bc.left(); @@ -605,8 +606,8 @@ private static Query translateQuery(BinaryComparison bc) { return new TermQuery(loc, name, value); } - Check.isTrue(false, "don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), bc); - return null; + throw new SqlIllegalArgumentException("Don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), + bc); } } @@ -700,9 +701,8 @@ else if (onAggs) { return new QueryTranslation(query, aggFilter); } else { - throw new UnsupportedOperationException("No idea how to translate " + e); + throw new SqlIllegalArgumentException("No idea how to translate " + e); } - } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java index 06b96552e5e98..9d05d151359fd 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/FieldAttributeTests.java @@ -153,7 +153,7 @@ public void testDottedFieldPathTypo() { public void testStarExpansionExcludesObjectAndUnsupportedTypes() { LogicalPlan plan = plan("SELECT * FROM test"); List list = ((Project) plan).projections(); - assertThat(list, hasSize(7)); + assertThat(list, hasSize(8)); List names = Expressions.names(list); assertThat(names, not(hasItem("some"))); assertThat(names, not(hasItem("some.dotted"))); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index 2866624b07bb9..bddddc6941cbb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -17,7 +17,7 @@ public class SysColumnsTests extends ESTestCase { public void testSysColumns() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null); - assertEquals(15, rows.size()); + assertEquals(16, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -38,13 +38,13 @@ public void testSysColumns() { assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); - row = rows.get(6); + row = rows.get(7); assertEquals("some.dotted", name(row)); assertEquals(Types.STRUCT, sqlType(row)); assertEquals(null, radix(row)); assertEquals(-1, bufferLength(row)); - row = rows.get(14); + row = rows.get(15); assertEquals("some.ambiguous.normalized", name(row)); assertEquals(Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index f654d0c701178..2a3d87b65c964 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.planner; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer; import org.elasticsearch.xpack.sql.analysis.index.EsIndex; import org.elasticsearch.xpack.sql.analysis.index.IndexResolution; @@ -18,9 +19,11 @@ import org.elasticsearch.xpack.sql.plan.logical.Project; import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation; import org.elasticsearch.xpack.sql.querydsl.query.Query; +import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery; import org.elasticsearch.xpack.sql.querydsl.query.TermQuery; import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; +import org.joda.time.DateTime; import java.util.Map; import java.util.TimeZone; @@ -84,4 +87,56 @@ public void testTermEqualityNotAnalyzed() { assertEquals("int", tq.term()); assertEquals(5, tq.value()); } + + public void testComparisonAgainstColumns() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > int"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false)); + assertEquals("Line 1:43: Comparisons against variables are not (currently) supported; offender [int] in [>]", ex.getMessage()); + } + + public void testDateRange() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > 1969-05-13"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + assertEquals(1951, rq.lower()); + } + + public void testDateRangeLiteral() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > '1969-05-13'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + assertEquals("1969-05-13", rq.lower()); + } + + public void testDateRangeCast() { + LogicalPlan p = plan("SELECT some.string FROM test WHERE date > CAST('1969-05-13T12:34:56Z' AS DATE)"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + Expression condition = ((Filter) p).condition(); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + Query query = translation.query; + assertTrue(query instanceof RangeQuery); + RangeQuery rq = (RangeQuery) query; + assertEquals("date", rq.field()); + assertEquals(DateTime.parse("1969-05-13T12:34:56Z"), rq.lower()); + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json index 4e6ff625a5cc1..13c9f62b2136e 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-multi-field-variation.json @@ -4,6 +4,7 @@ "int" : { "type" : "integer" }, "text" : { "type" : "text" }, "keyword" : { "type" : "keyword" }, + "date" : { "type" : "date" }, "unsupported" : { "type" : "ip_range" }, "some" : { "properties" : { From e1a16a601840f271ee2092fd4ea0ec111a6e97b3 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 27 Apr 2018 08:41:36 +0200 Subject: [PATCH 23/24] REST: Remove GET support for clear cache indices (#29525) Clearing the cache indices can be done via GET and POST. As GET should only support read only operations, this removes the support for using GET for clearing the indices caches. --- docs/reference/release-notes/7.0.0-alpha1.asciidoc | 5 ++++- .../resources/rest-api-spec/api/indices.clear_cache.json | 2 +- .../action/admin/indices/RestClearIndicesCacheAction.java | 4 ---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/reference/release-notes/7.0.0-alpha1.asciidoc b/docs/reference/release-notes/7.0.0-alpha1.asciidoc index 128a9b7dd716b..618d9e70dcb30 100644 --- a/docs/reference/release-notes/7.0.0-alpha1.asciidoc +++ b/docs/reference/release-notes/7.0.0-alpha1.asciidoc @@ -8,4 +8,7 @@ The changes listed below have been released for the first time in Elasticsearch === Breaking changes Core:: -* Tribe node has been removed in favor of Cross-Cluster-Search \ No newline at end of file +* Tribe node has been removed in favor of Cross-Cluster-Search + +Rest API:: +* The Clear Cache API only supports `POST` as HTTP method diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 9a0f7be1d65ae..7e62371dd674d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -1,7 +1,7 @@ { "indices.clear_cache": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", - "methods": ["POST", "GET"], + "methods": ["POST"], "url": { "path": "/_cache/clear", "paths": ["/_cache/clear", "/{index}/_cache/clear"], diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index 266c1cb68f03f..38b9d987d04e5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -31,7 +31,6 @@ import java.io.IOException; -import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestClearIndicesCacheAction extends BaseRestHandler { @@ -40,9 +39,6 @@ public RestClearIndicesCacheAction(Settings settings, RestController controller) super(settings); controller.registerHandler(POST, "/_cache/clear", this); controller.registerHandler(POST, "/{index}/_cache/clear", this); - - controller.registerHandler(GET, "/_cache/clear", this); - controller.registerHandler(GET, "/{index}/_cache/clear", this); } @Override From 707ba28d487e9ef2572744a2bf5342e67ffa826e Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Fri, 27 Apr 2018 08:55:25 +0200 Subject: [PATCH 24/24] Watcher: Ensure mail message ids are unique per watch action (#30112) Email message IDs are supposed to be unique. In order to guarantee this, we need to take the action id of a watch action into account as well, not just the watch id from the watch execution context. This prevents that two actions from the same watch execution end up with the same message id. --- .../actions/email/ExecutableEmailAction.java | 2 +- .../watcher/notification/email/Email.java | 2 +- .../actions/email/EmailActionTests.java | 2 +- .../actions/email/EmailMessageIdTests.java | 88 +++++++++++++++++++ 4 files changed, 91 insertions(+), 3 deletions(-) create mode 100644 x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java index 8e0955c4eaf40..f737d89c1286d 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/email/ExecutableEmailAction.java @@ -63,7 +63,7 @@ public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload } Email.Builder email = action.getEmail().render(templateEngine, model, htmlSanitizer, attachments); - email.id(ctx.id().value()); + email.id(actionId + "_" + ctx.id().value()); if (ctx.simulateAction(actionId)) { return new EmailAction.Result.Simulated(email.build()); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java index fcf3030233f35..88800f8709aa6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Email.java @@ -354,7 +354,7 @@ public Builder attach(Attachment attachment) { * after this is called is incorrect. */ public Email build() { - assert id != null : "email id should not be null (should be set to the watch id"; + assert id != null : "email id should not be null"; Email email = new Email(id, from, replyTo, priority, sentDate, to, cc, bcc, subject, textBody, htmlBody, unmodifiableMap(attachments)); attachments = null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java index 1645f61a734d8..83b48cb9f4f0a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailActionTests.java @@ -171,7 +171,7 @@ public void testExecute() throws Exception { assertThat(result, instanceOf(EmailAction.Result.Success.class)); assertThat(((EmailAction.Result.Success) result).account(), equalTo(account)); Email actualEmail = ((EmailAction.Result.Success) result).email(); - assertThat(actualEmail.id(), is(wid.value())); + assertThat(actualEmail.id(), is("_id_" + wid.value())); assertThat(actualEmail, notNullValue()); assertThat(actualEmail.subject(), is(subject == null ? null : subject.getTemplate())); assertThat(actualEmail.textBody(), is(textBody == null ? null : textBody.getTemplate())); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java new file mode 100644 index 0000000000000..9051f50e62b85 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.junit.After; +import org.junit.Before; + +import javax.mail.internet.MimeMessage; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.hasSize; + +public class EmailMessageIdTests extends ESTestCase { + + private EmailServer server; + private TextTemplateEngine textTemplateEngine = new MockTextTemplateEngine(); + private HtmlSanitizer htmlSanitizer = new HtmlSanitizer(Settings.EMPTY); + private EmailService emailService; + private EmailAction emailAction; + + @Before + public void startSmtpServer() { + server = EmailServer.localhost(logger); + + Settings settings = Settings.builder() + .put("xpack.notification.email.account.test.smtp.auth", true) + .put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME) + .put("xpack.notification.email.account.test.smtp.password", EmailServer.PASSWORD) + .put("xpack.notification.email.account.test.smtp.port", server.port()) + .put("xpack.notification.email.account.test.smtp.host", "localhost") + .build(); + + Set> registeredSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + registeredSettings.addAll(EmailService.getSettings()); + ClusterSettings clusterSettings = new ClusterSettings(settings, registeredSettings); + emailService = new EmailService(settings, null, clusterSettings); + EmailTemplate emailTemplate = EmailTemplate.builder().from("from@example.org").to("to@example.org") + .subject("subject").textBody("body").build(); + emailAction = new EmailAction(emailTemplate, null, null, null, null, null); + } + + @After + public void stopSmtpServer() { + server.stop(); + } + + public void testThatMessageIdIsUnique() throws Exception { + List messages = new ArrayList<>(); + server.addListener(messages::add); + ExecutableEmailAction firstEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine, + htmlSanitizer, Collections.emptyMap()); + ExecutableEmailAction secondEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine, + htmlSanitizer, Collections.emptyMap()); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger); + firstEmailAction.execute("my_first_action_id", ctx, Payload.EMPTY); + secondEmailAction.execute("my_second_action_id", ctx, Payload.EMPTY); + + assertThat(messages, hasSize(2)); + // check for unique message ids, should be two as well + Set messageIds = new HashSet<>(); + for (MimeMessage message : messages) { + messageIds.add(message.getMessageID()); + } + assertThat(messageIds, hasSize(2)); + } +} +