From 4a054668249c5431b7a2d55b9a7e5c133fb47dde Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 31 May 2018 17:47:48 -0400 Subject: [PATCH 01/18] Watcher: Give test a little more time Changes watcher's integration tests to wait 30 seconds when starting watcher rather than 10 seconds because this build failed when starting took 12 seconds: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+6.3+periodic/222/console --- .../watcher/test/AbstractWatcherIntegrationTestCase.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 26b02a195f756..4eb4bd1aa2c6e 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -73,6 +73,7 @@ import java.util.Map; import java.util.Locale; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -483,8 +484,7 @@ protected void startWatcher() throws Exception { } throw new AssertionError("unexpected state, retrying with next run"); - }); - + }, 30, TimeUnit.SECONDS); } protected void ensureLicenseEnabled() throws Exception { From 443e47da90292159d59ed5d3919120485f95c5c3 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 31 May 2018 17:02:18 -0700 Subject: [PATCH 02/18] [DOCS] Fixes links (#31011) --- x-pack/docs/en/ml/architecture.asciidoc | 3 ++- x-pack/docs/en/ml/configuring.asciidoc | 4 ++-- x-pack/docs/en/ml/getting-started-next.asciidoc | 2 +- x-pack/docs/en/ml/getting-started.asciidoc | 7 ++++--- x-pack/docs/en/ml/troubleshooting.asciidoc | 2 +- x-pack/docs/en/security/troubleshooting.asciidoc | 2 +- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/x-pack/docs/en/ml/architecture.asciidoc b/x-pack/docs/en/ml/architecture.asciidoc index d16a8301da107..6fc3e36964ff7 100644 --- a/x-pack/docs/en/ml/architecture.asciidoc +++ b/x-pack/docs/en/ml/architecture.asciidoc @@ -6,4 +6,5 @@ A {ml} node is a node that has `xpack.ml.enabled` and `node.ml` set to `true`, which is the default behavior. If you set `node.ml` to `false`, the node can service API requests but it cannot run jobs. If you want to use {xpackml} features, there must be at least one {ml} node in your cluster. For more -information about this setting, see <>. +information about this setting, see +{ref}/ml-settings.html[{ml} settings in {es}]. diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/x-pack/docs/en/ml/configuring.asciidoc index b794d3ebd3330..ba965a08b0462 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/x-pack/docs/en/ml/configuring.asciidoc @@ -3,8 +3,8 @@ If you want to use {xpackml} features, there must be at least one {ml} node in your cluster and all master-eligible nodes must have {ml} enabled. By default, -all nodes are {ml} nodes. For more information about these settings, see -<>. +all nodes are {ml} nodes. For more information about these settings, see +{ref}/modules-node.html#modules-node-xpack[{ml} nodes]. To use the {xpackml} features to analyze your data, you must create a job and send your data to that job. diff --git a/x-pack/docs/en/ml/getting-started-next.asciidoc b/x-pack/docs/en/ml/getting-started-next.asciidoc index 8717474759236..90d1e7798ee9c 100644 --- a/x-pack/docs/en/ml/getting-started-next.asciidoc +++ b/x-pack/docs/en/ml/getting-started-next.asciidoc @@ -51,5 +51,5 @@ multi-metric jobs and split the data or create more complex analysis functions as necessary. For examples of more complicated configuration options, see <>. -If you encounter problems, we're here to help. See <> and +If you encounter problems, we're here to help. See <> and <>. diff --git a/x-pack/docs/en/ml/getting-started.asciidoc b/x-pack/docs/en/ml/getting-started.asciidoc index 2fd4f1ebe4972..0f1b7164d4adf 100644 --- a/x-pack/docs/en/ml/getting-started.asciidoc +++ b/x-pack/docs/en/ml/getting-started.asciidoc @@ -1,7 +1,7 @@ [[ml-getting-started]] -== Getting Started with Machine Learning +== Getting started with machine learning ++++ -Getting Started +Getting started ++++ Ready to get some hands-on experience with the {xpackml} features? This @@ -50,7 +50,8 @@ you can try all of the {xpack} features with a <>. +activity related to jobs, see +{ref}/modules-node.html#modules-node-xpack[{ml} node settings]. [float] [[ml-gs-users]] diff --git a/x-pack/docs/en/ml/troubleshooting.asciidoc b/x-pack/docs/en/ml/troubleshooting.asciidoc index 3412845e98077..d5244cebdae70 100644 --- a/x-pack/docs/en/ml/troubleshooting.asciidoc +++ b/x-pack/docs/en/ml/troubleshooting.asciidoc @@ -10,7 +10,7 @@ answers for frequently asked questions. * <> * <> -To get help, see <>. +To get help, see <>. [[ml-rollingupgrade]] === Machine learning features unavailable after rolling upgrade diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index c41ee11253f17..0bc043abd35cc 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -20,7 +20,7 @@ answers for frequently asked questions. * <> -To get help, see <>. +To get help, see <>. [[security-auth-failure-upgrade]] === Can't log in after upgrading to {version} From 61316f01107753e8b05d8d741fab07ffbfbd06f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 1 Jun 2018 09:45:04 +0200 Subject: [PATCH 03/18] [Tests] Fix alias names in PutIndexTemplateRequestTests (#30960) The randomized alias names could contain unicode controll charactes that don't survive an xContent rendering and parsing roundtrip when using the YAML xContent type. This fix filters the randomized unicode string for control characters to avoid this particular problem. Closes #30911 --- .../action/admin/indices/alias/Alias.java | 10 ++++++++-- .../template/put/PutIndexTemplateRequestTests.java | 4 +++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index bd3c77cdb264a..9172500a8cb50 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -28,7 +28,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -42,7 +43,7 @@ /** * Represents an alias, to be associated with an index */ -public class Alias implements Streamable, ToXContentObject { +public class Alias implements Streamable, ToXContentFragment { private static final ParseField FILTER = new ParseField("filter"); private static final ParseField ROUTING = new ParseField("routing"); @@ -248,6 +249,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + return Strings.toString(this); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java index 577a8b55e61a3..c21e6b3c225f0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java @@ -150,7 +150,9 @@ protected PutIndexTemplateRequest createTestInstance() { request.patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); int numAlias = between(0, 5); for (int i = 0; i < numAlias; i++) { - Alias alias = new Alias(randomRealisticUnicodeOfLengthBetween(1, 10)); + // some ASCII or Latin-1 control characters, especially newline, can lead to + // problems with yaml parsers, that's why we filter them here (see #30911) + Alias alias = new Alias(randomRealisticUnicodeOfLengthBetween(1, 10).replaceAll("\\p{Cc}", "")); if (randomBoolean()) { alias.indexRouting(randomRealisticUnicodeOfLengthBetween(1, 10)); } From db8aee8c66f1a543196d5a20c558386108920648 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 1 Jun 2018 10:02:57 +0200 Subject: [PATCH 04/18] Fix interoperability with < 6.3 transport clients (#30971) With the default distribution changing in 6.3, clusters might now contain custom metadata that a pure OSS transport client cannot deserialize. As this can break transport clients when accessing the cluster state or reroute APIs, we've decided to exclude any custom metadata that the transport client might not be able to deserialize. This will ensure compatibility between a < 6.3 transport client and a 6.3 default distribution cluster. Note that this PR only covers interoperability with older clients, another follow-up PR will cover full interoperability for >= 6.3 transport clients where we will make it possible again to get the custom metadata from the cluster state. Relates to #30731 --- .../reroute/ClusterRerouteResponse.java | 8 ++++- .../cluster/state/ClusterStateResponse.java | 7 ++++- .../elasticsearch/cluster/ClusterModule.java | 31 +++++++++++++++++++ .../cluster/ClusterModuleTests.java | 27 ++++++++++++++++ 4 files changed, 71 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index f3233f4147ea8..caf9fa62d4cd1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -19,7 +19,9 @@ package org.elasticsearch.action.admin.cluster.reroute; +import org.elasticsearch.Version; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.common.io.stream.StreamInput; @@ -70,7 +72,11 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - state.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + state.writeTo(out); + } else { + ClusterModule.filterCustomsForPre63Clients(state).writeTo(out); + } writeAcknowledged(out); RoutingExplanations.writeTo(explanations, out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index cdc869e529d3b..71ad6f9f83476 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -21,6 +21,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.io.stream.StreamInput; @@ -94,7 +95,11 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); clusterName.writeTo(out); - clusterState.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + clusterState.writeTo(out); + } else { + ClusterModule.filterCustomsForPre63Clients(clusterState).writeTo(out); + } if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { totalCompressedSize.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 7f16c3f85ffc6..9c5c642df6b91 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -66,6 +66,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.ingest.IngestMetadata; @@ -84,6 +85,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; @@ -150,6 +152,35 @@ public static List getNamedWriteables() { return entries; } + static final Set PRE_6_3_METADATA_CUSTOMS_WHITE_LIST = Collections.unmodifiableSet(Sets.newHashSet( + IndexGraveyard.TYPE, IngestMetadata.TYPE, RepositoriesMetaData.TYPE, ScriptMetaData.TYPE)); + + static final Set PRE_6_3_CLUSTER_CUSTOMS_WHITE_LIST = Collections.unmodifiableSet(Sets.newHashSet( + RestoreInProgress.TYPE, SnapshotDeletionsInProgress.TYPE, SnapshotsInProgress.TYPE)); + + /** + * For interoperability with transport clients older than 6.3, we need to strip customs + * from the cluster state that the client might not be able to deserialize + * + * @param clusterState the cluster state to filter the customs from + * @return the adapted cluster state + */ + public static ClusterState filterCustomsForPre63Clients(ClusterState clusterState) { + final ClusterState.Builder builder = ClusterState.builder(clusterState); + clusterState.customs().keysIt().forEachRemaining(name -> { + if (PRE_6_3_CLUSTER_CUSTOMS_WHITE_LIST.contains(name) == false) { + builder.removeCustom(name); + } + }); + final MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); + clusterState.metaData().customs().keysIt().forEachRemaining(name -> { + if (PRE_6_3_METADATA_CUSTOMS_WHITE_LIST.contains(name) == false) { + metaBuilder.removeCustom(name); + } + }); + return builder.metaData(metaBuilder).build(); + } + public static List getNamedXWriteables() { List entries = new ArrayList<>(); // Metadata diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 176616690f0aa..efd8026645249 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; @@ -251,4 +253,29 @@ public Map> getInitialClusterStateCustomSu assertEquals(ise.getMessage(), "custom supplier key [foo] is registered more than once"); } } + + public void testPre63CustomsFiltering() { + final String whiteListedClusterCustom = randomFrom(ClusterModule.PRE_6_3_CLUSTER_CUSTOMS_WHITE_LIST); + final String whiteListedMetaDataCustom = randomFrom(ClusterModule.PRE_6_3_METADATA_CUSTOMS_WHITE_LIST); + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .putCustom(whiteListedClusterCustom, new RestoreInProgress()) + .putCustom("other", new RestoreInProgress()) + .metaData(MetaData.builder() + .putCustom(whiteListedMetaDataCustom, new RepositoriesMetaData(Collections.emptyList())) + .putCustom("other", new RepositoriesMetaData(Collections.emptyList())) + .build()) + .build(); + + assertNotNull(clusterState.custom(whiteListedClusterCustom)); + assertNotNull(clusterState.custom("other")); + assertNotNull(clusterState.metaData().custom(whiteListedMetaDataCustom)); + assertNotNull(clusterState.metaData().custom("other")); + + final ClusterState fixedClusterState = ClusterModule.filterCustomsForPre63Clients(clusterState); + + assertNotNull(fixedClusterState.custom(whiteListedClusterCustom)); + assertNull(fixedClusterState.custom("other")); + assertNotNull(fixedClusterState.metaData().custom(whiteListedMetaDataCustom)); + assertNull(fixedClusterState.metaData().custom("other")); + } } From 20c8060d411f91bea01381edda483d82904b8d10 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Fri, 1 Jun 2018 10:47:53 +0200 Subject: [PATCH 05/18] Allow rollup job creation only if cluster is x-pack ready (#30963) Otherwise we could end up with persistent tasks metadata in the cluster that some of the nodes might not understand in case where the cluster is during rolling upgrade from the default 6.2 to the default 6.3 distribution. Follow-up to #30743 --- .../xpack/rollup/action/TransportPutRollupJobAction.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 0e674ba000bd0..24dcb323e3dc6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -42,6 +42,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; import org.elasticsearch.xpack.core.rollup.job.RollupJob; @@ -91,6 +92,8 @@ protected void masterOperation(PutRollupJobAction.Request request, ClusterState return; } + XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); + FieldCapabilitiesRequest fieldCapsRequest = new FieldCapabilitiesRequest() .indices(request.getConfig().getIndexPattern()) .fields(request.getConfig().getAllFields().toArray(new String[0])); From b9913657214548e22664ff77a18b6fbc7f3a239a Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 1 Jun 2018 09:47:03 +0200 Subject: [PATCH 06/18] Add an option to split keyword field on whitespace at query time (#30691) This change adds an option named `split_queries_on_whitespace` to the `keyword` field type. When set to true full text queries (`match`, `multi_match`, `query_string`, ...) that target the field will split the input on whitespace to build the query terms. Defaults to `false`. Closes #30393 --- docs/reference/mapping/types/keyword.asciidoc | 6 ++ .../metadata/MetaDataIndexUpgradeService.java | 2 +- .../index/analysis/AnalysisRegistry.java | 22 +++--- .../analysis/CustomNormalizerProvider.java | 11 ++- .../index/analysis/IndexAnalyzers.java | 11 ++- .../index/mapper/KeywordFieldMapper.java | 62 ++++++++++++++--- .../index/search/MatchQuery.java | 4 +- .../index/analysis/CustomNormalizerTests.java | 14 +++- .../index/mapper/KeywordFieldMapperTests.java | 57 ++++++++++++++++ .../index/mapper/ParentFieldMapperTests.java | 2 +- .../index/search/MultiMatchQueryTests.java | 67 +++++++++++++++++++ .../index/engine/TranslogHandler.java | 2 +- 12 files changed, 229 insertions(+), 31 deletions(-) diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index c7b35d7315ea6..09d540feed19b 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -103,6 +103,12 @@ The following parameters are accepted by `keyword` fields: How to pre-process the keyword prior to indexing. Defaults to `null`, meaning the keyword is kept as-is. +`split_queries_on_whitespace`:: + + Whether <> should split the input on whitespace + when building a query for this field. + Accepts `true` or `false` (default). + NOTE: Indexes imported from 2.x do not support `keyword`. Instead they will attempt to downgrade `keyword` into `string`. This allows you to merge modern mappings with legacy mappings. Long lived indexes will have to be recreated diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 14bd9f9b58d3a..1e4b8d91e8452 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -186,7 +186,7 @@ public Set> entrySet() { return Collections.emptySet(); } }; - try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) { + try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 4a949dd0f57e2..375932774273c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -20,6 +20,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -461,13 +462,16 @@ public IndexAnalyzers build(IndexSettings indexSettings, Map analyzerAliases = new HashMap<>(); Map analyzers = new HashMap<>(); Map normalizers = new HashMap<>(); + Map whitespaceNormalizers = new HashMap<>(); for (Map.Entry> entry : analyzerProviders.entrySet()) { processAnalyzerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); } for (Map.Entry> entry : normalizerProviders.entrySet()) { - processNormalizerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), normalizers, - tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories); + processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, + "keyword", tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories); + processNormalizerFactory(entry.getKey(), entry.getValue(), whitespaceNormalizers, + "whitespace", () -> new WhitespaceTokenizer(), tokenFilterFactoryFactories, charFilterFactoryFactories); } for (Map.Entry entry : analyzerAliases.entrySet()) { String key = entry.getKey(); @@ -514,7 +518,7 @@ public IndexAnalyzers build(IndexSettings indexSettings, } } return new IndexAnalyzers(indexSettings, defaultIndexAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer, - unmodifiableMap(analyzers), unmodifiableMap(normalizers)); + unmodifiableMap(analyzers), unmodifiableMap(normalizers), unmodifiableMap(whitespaceNormalizers)); } private void processAnalyzerFactory(DeprecationLogger deprecationLogger, @@ -581,20 +585,20 @@ private void processAnalyzerFactory(DeprecationLogger deprecationLogger, } } - private void processNormalizerFactory(DeprecationLogger deprecationLogger, - IndexSettings indexSettings, - String name, + private void processNormalizerFactory(String name, AnalyzerProvider normalizerFactory, Map normalizers, - TokenizerFactory keywordTokenizerFactory, + String tokenizerName, + TokenizerFactory tokenizerFactory, Map tokenFilters, Map charFilters) { - if (keywordTokenizerFactory == null) { + + if (tokenizerFactory == null) { throw new IllegalStateException("keyword tokenizer factory is null, normalizers require analysis-common module"); } if (normalizerFactory instanceof CustomNormalizerProvider) { - ((CustomNormalizerProvider) normalizerFactory).build(keywordTokenizerFactory, charFilters, tokenFilters); + ((CustomNormalizerProvider) normalizerFactory).build(tokenizerName, tokenizerFactory, charFilters, tokenFilters); } Analyzer normalizerF = normalizerFactory.get(); if (normalizerF == null) { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java index a0a7859d50cfd..13946be3a8d22 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/CustomNormalizerProvider.java @@ -38,15 +38,14 @@ public final class CustomNormalizerProvider extends AbstractIndexAnalyzerProvide private CustomAnalyzer customAnalyzer; public CustomNormalizerProvider(IndexSettings indexSettings, - String name, Settings settings) { + String name, Settings settings) { super(indexSettings, name, settings); this.analyzerSettings = settings; } - public void build(final TokenizerFactory keywordTokenizerFactory, final Map charFilters, + public void build(final String tokenizerName, final TokenizerFactory tokenizerFactory, final Map charFilters, final Map tokenFilters) { - String tokenizerName = analyzerSettings.get("tokenizer"); - if (tokenizerName != null) { + if (analyzerSettings.get("tokenizer") != null) { throw new IllegalArgumentException("Custom normalizer [" + name() + "] cannot configure a tokenizer"); } @@ -82,8 +81,8 @@ public void build(final TokenizerFactory keywordTokenizerFactory, final Map analyzers; private final Map normalizers; + private final Map whitespaceNormalizers; private final IndexSettings indexSettings; public IndexAnalyzers(IndexSettings indexSettings, NamedAnalyzer defaultIndexAnalyzer, NamedAnalyzer defaultSearchAnalyzer, NamedAnalyzer defaultSearchQuoteAnalyzer, Map analyzers, - Map normalizers) { + Map normalizers, Map whitespaceNormalizers) { super(indexSettings); this.defaultIndexAnalyzer = defaultIndexAnalyzer; this.defaultSearchAnalyzer = defaultSearchAnalyzer; this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer; this.analyzers = analyzers; this.normalizers = normalizers; + this.whitespaceNormalizers = whitespaceNormalizers; this.indexSettings = indexSettings; } @@ -68,6 +70,13 @@ public NamedAnalyzer getNormalizer(String name) { return normalizers.get(name); } + /** + * Returns a normalizer that splits on whitespace mapped to the given name or null if not present + */ + public NamedAnalyzer getWhitespaceNormalizer(String name) { + return whitespaceNormalizers.get(name); + } + /** * Returns the default index analyzer for this index */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index b22dc0e51f926..de9283159038b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedSetDocValuesField; @@ -35,6 +36,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -73,6 +76,8 @@ public static class Builder extends FieldMapper.Builder parse(String name, Map node, ParserCo iterator.remove(); } else if (propName.equals("normalizer")) { if (propNode != null) { - NamedAnalyzer normalizer = parserContext.getIndexAnalyzers().getNormalizer(propNode.toString()); - if (normalizer == null) { - throw new MapperParsingException("normalizer [" + propNode.toString() + "] not found for field [" + name + "]"); - } - builder.normalizer(normalizer); + builder.normalizer(parserContext.getIndexAnalyzers(), propNode.toString()); } iterator.remove(); + } else if (propName.equals("split_queries_on_whitespace")) { + builder.splitQueriesOnWhitespace(XContentMapValues.nodeBooleanValue(propNode, "split_queries_on_whitespace")); + iterator.remove(); } } return builder; @@ -163,6 +188,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo public static final class KeywordFieldType extends StringFieldType { private NamedAnalyzer normalizer = null; + private boolean splitQueriesOnWhitespace; public KeywordFieldType() { setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); @@ -172,6 +198,7 @@ public KeywordFieldType() { protected KeywordFieldType(KeywordFieldType ref) { super(ref); this.normalizer = ref.normalizer; + this.splitQueriesOnWhitespace = splitQueriesOnWhitespace; } public KeywordFieldType clone() { @@ -183,7 +210,9 @@ public boolean equals(Object o) { if (super.equals(o) == false) { return false; } - return Objects.equals(normalizer, ((KeywordFieldType) o).normalizer); + KeywordFieldType other = (KeywordFieldType) o; + return Objects.equals(normalizer, other.normalizer) && + splitQueriesOnWhitespace == other.splitQueriesOnWhitespace; } @Override @@ -197,7 +226,7 @@ public void checkCompatibility(MappedFieldType otherFT, List conflicts, @Override public int hashCode() { - return 31 * super.hashCode() + Objects.hashCode(normalizer); + return 31 * super.hashCode() + Objects.hash(normalizer, splitQueriesOnWhitespace); } @Override @@ -214,6 +243,15 @@ public void setNormalizer(NamedAnalyzer normalizer) { this.normalizer = normalizer; } + public boolean splitQueriesOnWhitespace() { + return splitQueriesOnWhitespace; + } + + public void setSplitQueriesOnWhitespace(boolean splitQueriesOnWhitespace) { + checkIfFrozen(); + this.splitQueriesOnWhitespace = splitQueriesOnWhitespace; + } + @Override public Query existsQuery(QueryShardContext context) { if (hasDocValues()) { @@ -393,5 +431,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, } else if (includeDefaults) { builder.nullField("normalizer"); } + + if (includeDefaults || fieldType().splitQueriesOnWhitespace) { + builder.field("split_queries_on_whitespace", fieldType().splitQueriesOnWhitespace); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 509d09bf2f3cb..052bc8d1b077f 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -264,7 +265,8 @@ public Query parse(Type type, String fieldName, Object value) throws IOException * passing through QueryBuilder. */ boolean noForcedAnalyzer = this.analyzer == null; - if (fieldType.tokenized() == false && noForcedAnalyzer) { + if (fieldType.tokenized() == false && noForcedAnalyzer && + fieldType instanceof KeywordFieldMapper.KeywordFieldType == false) { return blendTermQuery(new Term(fieldName, value.toString()), fieldType); } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java index e2025145241c0..0a1f625f20323 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/CustomNormalizerTests.java @@ -54,6 +54,12 @@ public void testBasics() throws IOException { assertEquals("my_normalizer", normalizer.name()); assertTokenStreamContents(normalizer.tokenStream("foo", "Cet été-là"), new String[] {"cet été-là"}); assertEquals(new BytesRef("cet été-là"), normalizer.normalize("foo", "Cet été-là")); + + normalizer = analysis.indexAnalyzers.getWhitespaceNormalizer("my_normalizer"); + assertNotNull(normalizer); + assertEquals("my_normalizer", normalizer.name()); + assertTokenStreamContents(normalizer.tokenStream("foo", "Cet été-là"), new String[] {"cet", "été-là"}); + assertEquals(new BytesRef("cet été-là"), normalizer.normalize("foo", "Cet été-là")); } public void testUnknownType() { @@ -88,7 +94,13 @@ public void testCharFilters() throws IOException { NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer"); assertNotNull(normalizer); assertEquals("my_normalizer", normalizer.name()); - assertTokenStreamContents(normalizer.tokenStream("foo", "abc"), new String[] {"zbc"}); + assertTokenStreamContents(normalizer.tokenStream("foo", "abc acd"), new String[] {"zbc zcd"}); + assertEquals(new BytesRef("zbc"), normalizer.normalize("foo", "abc")); + + normalizer = analysis.indexAnalyzers.getWhitespaceNormalizer("my_normalizer"); + assertNotNull(normalizer); + assertEquals("my_normalizer", normalizer.name()); + assertTokenStreamContents(normalizer.tokenStream("foo", "abc acd"), new String[] {"zbc", "zcd"}); assertEquals(new BytesRef("zbc"), normalizer.normalize("foo", "abc")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index bb3e87936db64..cbb4ed6130736 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -51,9 +51,11 @@ import java.util.Map; import static java.util.Collections.singletonList; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class KeywordFieldMapperTests extends ESSingleNodeTestCase { /** @@ -411,4 +413,59 @@ public void testEmptyName() throws IOException { ); assertThat(e.getMessage(), containsString("name cannot be empty string")); } + + public void testSplitQueriesOnWhitespace() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .startObject("field_with_normalizer") + .field("type", "keyword") + .field("normalizer", "my_lowercase") + .field("split_queries_on_whitespace", true) + .endObject() + .endObject() + .endObject().endObject()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, true); + + MappedFieldType fieldType = indexService.mapperService().fullName("field"); + assertThat(fieldType, instanceOf(KeywordFieldMapper.KeywordFieldType.class)); + KeywordFieldMapper.KeywordFieldType ft = (KeywordFieldMapper.KeywordFieldType) fieldType; + assertTokenStreamContents(ft.searchAnalyzer().analyzer().tokenStream("", "Hello World"), new String[] {"Hello World"}); + + fieldType = indexService.mapperService().fullName("field_with_normalizer"); + assertThat(fieldType, instanceOf(KeywordFieldMapper.KeywordFieldType.class)); + ft = (KeywordFieldMapper.KeywordFieldType) fieldType; + assertThat(ft.searchAnalyzer().name(), equalTo("my_lowercase")); + assertTokenStreamContents(ft.searchAnalyzer().analyzer().tokenStream("", "Hello World"), new String[] {"hello", "world"}); + + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .field("split_queries_on_whitespace", true) + .endObject() + .startObject("field_with_normalizer") + .field("type", "keyword") + .field("normalizer", "my_lowercase") + .field("split_queries_on_whitespace", false) + .endObject() + .endObject() + .endObject().endObject()); + indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, true); + + fieldType = indexService.mapperService().fullName("field"); + assertThat(fieldType, instanceOf(KeywordFieldMapper.KeywordFieldType.class)); + ft = (KeywordFieldMapper.KeywordFieldType) fieldType; + assertTokenStreamContents(ft.searchAnalyzer().analyzer().tokenStream("", "Hello World"), new String[] {"Hello", "World"}); + + fieldType = indexService.mapperService().fullName("field_with_normalizer"); + assertThat(fieldType, instanceOf(KeywordFieldMapper.KeywordFieldType.class)); + ft = (KeywordFieldMapper.KeywordFieldType) fieldType; + assertThat(ft.searchAnalyzer().name(), equalTo("my_lowercase")); + assertTokenStreamContents(ft.searchAnalyzer().analyzer().tokenStream("", "Hello World"), new String[] {"hello world"}); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java index c0b284f263fd8..23af6e749f506 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java @@ -115,7 +115,7 @@ public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer, - Collections.emptyMap(), Collections.emptyMap()); + Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry(), similarityService, new IndicesModule(emptyList()).getMapperRegistry(), () -> null); diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java index 64c07267b77ca..2f1e6e64ff39d 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.BlendedTermQuery; +import org.apache.lucene.queryparser.xml.builders.DisjunctionMaxQueryBuilder; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -33,9 +34,12 @@ import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperService; @@ -43,12 +47,16 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.search.MultiMatchQuery.FieldAndFieldType; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.MockKeywordPlugin; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -61,6 +69,11 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase { private IndexService indexService; + @Override + protected Collection> getPlugins() { + return Collections.singleton(MockKeywordPlugin.class); + } + @Before public void setup() throws IOException { Settings settings = Settings.builder() @@ -271,4 +284,58 @@ public void testMultiMatchCrossFieldsWithSynonymsPhrase() throws IOException { .build(); assertEquals(expected, query); } + + public void testKeywordSplitQueriesOnWhitespace() throws IOException { + IndexService indexService = createIndex("test_keyword", Settings.builder() + .put("index.analysis.normalizer.my_lowercase.type", "custom") + .putList("index.analysis.normalizer.my_lowercase.filter", "lowercase").build()); + MapperService mapperService = indexService.mapperService(); + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "keyword") + .endObject() + .startObject("field_normalizer") + .field("type", "keyword") + .field("normalizer", "my_lowercase") + .endObject() + .startObject("field_split") + .field("type", "keyword") + .field("split_queries_on_whitespace", true) + .endObject() + .startObject("field_split_normalizer") + .field("type", "keyword") + .field("normalizer", "my_lowercase") + .field("split_queries_on_whitespace", true) + .endObject() + .endObject() + .endObject().endObject()); + mapperService.merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, true); + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); + MultiMatchQuery parser = new MultiMatchQuery(queryShardContext); + Map fieldNames = new HashMap<>(); + fieldNames.put("field", 1.0f); + fieldNames.put("field_split", 1.0f); + fieldNames.put("field_normalizer", 1.0f); + fieldNames.put("field_split_normalizer", 1.0f); + Query query = parser.parse(MultiMatchQueryBuilder.Type.BEST_FIELDS, fieldNames, "Foo Bar", null); + DisjunctionMaxQuery expected = new DisjunctionMaxQuery( + Arrays.asList( + new TermQuery(new Term("field_normalizer", "foo bar")), + new TermQuery(new Term("field", "Foo Bar")), + new BooleanQuery.Builder() + .add(new TermQuery(new Term("field_split", "Foo")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field_split", "Bar")), BooleanClause.Occur.SHOULD) + .build(), + new BooleanQuery.Builder() + .add(new TermQuery(new Term("field_split_normalizer", "foo")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("field_split_normalizer", "bar")), BooleanClause.Occur.SHOULD) + .build() + ), 0.0f); + assertThat(query, equalTo(expected)); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 649f0ea20aff6..5b603bce7a966 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -61,7 +61,7 @@ long appliedOperations() { public TranslogHandler(NamedXContentRegistry xContentRegistry, IndexSettings indexSettings) { NamedAnalyzer defaultAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); IndexAnalyzers indexAnalyzers = - new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, emptyMap(), emptyMap()); + new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultAnalyzer, defaultAnalyzer, emptyMap(), emptyMap(), emptyMap()); SimilarityService similarityService = new SimilarityService(indexSettings, null, emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(emptyList()).getMapperRegistry(); mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry, From f76ce43a60ec43ff9363c7030d4f8cefa705b9c2 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 1 Jun 2018 10:51:12 +0100 Subject: [PATCH 07/18] [ML][TEST] Fix bucket count assertion in all tests in ModelPlotsIT (#31026) This fixes the last remaining test that was missed in #30717. Closes #30715 --- .../elasticsearch/xpack/ml/integration/ModelPlotsIT.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java index 81a44cd133643..db854a6cc647b 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java @@ -129,7 +129,11 @@ public void testByFieldWithTerms() throws Exception { startDatafeed(datafeedId, 0, System.currentTimeMillis()); waitUntilJobIsClosed(job.getId()); - assertThat(getBuckets(job.getId()).size(), equalTo(23)); + // As the initial time is random, there's a chance the first record is + // aligned on a bucket start. Thus we check the buckets are in [23, 24] + assertThat(getBuckets(job.getId()).size(), greaterThanOrEqualTo(23)); + assertThat(getBuckets(job.getId()).size(), lessThanOrEqualTo(24)); + Set modelPlotTerms = modelPlotTerms(job.getId(), "by_field_value"); assertThat(modelPlotTerms, containsInAnyOrder("user_2", "user_3")); } From 4356671ff69f639df4b5f5ef4eb3a6fc9f0557f0 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Thu, 31 May 2018 16:05:09 +0200 Subject: [PATCH 08/18] Refactor Sniffer and make it testable (#29638) This commit reworks the Sniffer component to simplify it and make it possible to test it. In particular, it no longer takes out the host that failed when sniffing on failure, but rather relies on whatever the cluster returns. This is the result of some valid comments from #27985. Taking out one single host is too naive, hard to test and debug. A new Scheduler abstraction is introduced to abstract the tasks scheduling away and make it possible to plug in any test implementation and take out timing aspects when testing. Concurrency aspects have also been improved, synchronized methods are no longer required. At the same time, we were able to take #27697 and #25701 into account and fix them, especially now that we can more easily add tests. Last but not least, unit tests are added for the Sniffer component, long overdue. Closes #27697 Closes #25701 --- .../org/elasticsearch/client/RestClient.java | 10 +- .../elasticsearch/client/RestClientTests.java | 32 + .../client/sniff/SniffOnFailureListener.java | 3 +- .../elasticsearch/client/sniff/Sniffer.java | 288 +++++--- .../client/sniff/MockHostsSniffer.java | 3 +- .../client/sniff/SnifferTests.java | 656 ++++++++++++++++++ .../exporter/http/HttpExporter.java | 6 +- .../exporter/http/NodeFailureListener.java | 2 +- .../http/NodeFailureListenerTests.java | 4 +- 9 files changed, 913 insertions(+), 91 deletions(-) create mode 100644 client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index de78fd29da4e4..0e603c4069ae4 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -61,6 +61,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -132,7 +133,7 @@ public synchronized void setHosts(HttpHost... hosts) { if (hosts == null || hosts.length == 0) { throw new IllegalArgumentException("hosts must not be null nor empty"); } - Set httpHosts = new HashSet<>(); + Set httpHosts = new LinkedHashSet<>(); AuthCache authCache = new BasicAuthCache(); for (HttpHost host : hosts) { Objects.requireNonNull(host, "host cannot be null"); @@ -143,6 +144,13 @@ public synchronized void setHosts(HttpHost... hosts) { this.blacklist.clear(); } + /** + * Returns the configured hosts + */ + public List getHosts() { + return new ArrayList<>(hostTuple.hosts); + } + /** * Sends a request to the Elasticsearch cluster that the client points to. * Blocks until the request is completed and returns its response or fails diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 15fa5c0f99596..5fe5fcae78fee 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.net.URI; +import java.util.Arrays; import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -251,6 +252,37 @@ public void testSetHostsWrongArguments() throws IOException { } } + public void testSetHostsPreservesOrdering() throws Exception { + try (RestClient restClient = createRestClient()) { + HttpHost[] hosts = randomHosts(); + restClient.setHosts(hosts); + assertEquals(Arrays.asList(hosts), restClient.getHosts()); + } + } + + private static HttpHost[] randomHosts() { + int numHosts = randomIntBetween(1, 10); + HttpHost[] hosts = new HttpHost[numHosts]; + for (int i = 0; i < hosts.length; i++) { + hosts[i] = new HttpHost("host-" + i, 9200); + } + return hosts; + } + + public void testSetHostsDuplicatedHosts() throws Exception { + try (RestClient restClient = createRestClient()) { + int numHosts = randomIntBetween(1, 10); + HttpHost[] hosts = new HttpHost[numHosts]; + HttpHost host = new HttpHost("host", 9200); + for (int i = 0; i < hosts.length; i++) { + hosts[i] = host; + } + restClient.setHosts(hosts); + assertEquals(1, restClient.getHosts().size()); + assertEquals(host, restClient.getHosts().get(0)); + } + } + /** * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. */ diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java index cbc77351de98b..41051555bae2c 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -58,7 +58,6 @@ public void onFailure(HttpHost host) { if (sniffer == null) { throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); } - //re-sniff immediately but take out the node that failed - sniffer.sniffOnFailure(host); + sniffer.sniffOnFailure(); } } diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index c655babd9ed3d..dc873ccd44e10 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -31,12 +31,14 @@ import java.security.PrivilegedAction; import java.util.List; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; /** * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of @@ -51,101 +53,175 @@ public class Sniffer implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer"; - private final Task task; + private final HostsSniffer hostsSniffer; + private final RestClient restClient; + private final long sniffIntervalMillis; + private final long sniffAfterFailureDelayMillis; + private final Scheduler scheduler; + private final AtomicBoolean initialized = new AtomicBoolean(false); + private volatile ScheduledTask nextScheduledTask; Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { - this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); + this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); + } + + Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { + this.hostsSniffer = hostsSniffer; + this.restClient = restClient; + this.sniffIntervalMillis = sniffInterval; + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay; + this.scheduler = scheduler; + /* + * The first sniffing round is async, so this constructor returns before nextScheduledTask is assigned to a task. + * The initialized flag is a protection against NPE due to that. + */ + Task task = new Task(sniffIntervalMillis) { + @Override + public void run() { + super.run(); + initialized.compareAndSet(false, true); + } + }; + /* + * We do not keep track of the returned future as we never intend to cancel the initial sniffing round, we rather + * prevent any other operation from being executed till the sniffer is properly initialized + */ + scheduler.schedule(task, 0L); } /** - * Triggers a new sniffing round and explicitly takes out the failed host provided as argument + * Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs + * it will also schedule a new round after sniffAfterFailureDelay ms. */ - public void sniffOnFailure(HttpHost failedHost) { - this.task.sniffOnFailure(failedHost); + public void sniffOnFailure() { + //sniffOnFailure does nothing until the initial sniffing round has been completed + if (initialized.get()) { + /* + * If sniffing is already running, there is no point in scheduling another round right after the current one. + * Concurrent calls may be checking the same task state, but only the first skip call on the same task returns true. + * The task may also get replaced while we check its state, in which case calling skip on it returns false. + */ + if (this.nextScheduledTask.skip()) { + /* + * We do not keep track of this future as the task will immediately run and we don't intend to cancel it + * due to concurrent sniffOnFailure runs. Effectively the previous (now cancelled or skipped) task will stay + * assigned to nextTask till this onFailure round gets run and schedules its corresponding afterFailure round. + */ + scheduler.schedule(new Task(sniffAfterFailureDelayMillis), 0L); + } + } } - @Override - public void close() throws IOException { - task.shutdown(); + enum TaskState { + WAITING, SKIPPED, STARTED } - private static class Task implements Runnable { - private final HostsSniffer hostsSniffer; - private final RestClient restClient; - - private final long sniffIntervalMillis; - private final long sniffAfterFailureDelayMillis; - private final ScheduledExecutorService scheduledExecutorService; - private final AtomicBoolean running = new AtomicBoolean(false); - private ScheduledFuture scheduledFuture; - - private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) { - this.hostsSniffer = hostsSniffer; - this.restClient = restClient; - this.sniffIntervalMillis = sniffIntervalMillis; - this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; - SnifferThreadFactory threadFactory = new SnifferThreadFactory(SNIFFER_THREAD_NAME); - this.scheduledExecutorService = Executors.newScheduledThreadPool(1, threadFactory); - scheduleNextRun(0); - } - - synchronized void scheduleNextRun(long delayMillis) { - if (scheduledExecutorService.isShutdown() == false) { - try { - if (scheduledFuture != null) { - //regardless of when the next sniff is scheduled, cancel it and schedule a new one with updated delay - this.scheduledFuture.cancel(false); - } - logger.debug("scheduling next sniff in " + delayMillis + " ms"); - this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS); - } catch(Exception e) { - logger.error("error while scheduling next sniffer task", e); - } - } + class Task implements Runnable { + final long nextTaskDelay; + final AtomicReference taskState = new AtomicReference<>(TaskState.WAITING); + + Task(long nextTaskDelay) { + this.nextTaskDelay = nextTaskDelay; } @Override public void run() { - sniff(null, sniffIntervalMillis); - } - - void sniffOnFailure(HttpHost failedHost) { - sniff(failedHost, sniffAfterFailureDelayMillis); - } - - void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { - if (running.compareAndSet(false, true)) { - try { - List sniffedHosts = hostsSniffer.sniffHosts(); - logger.debug("sniffed hosts: " + sniffedHosts); - if (excludeHost != null) { - sniffedHosts.remove(excludeHost); - } - if (sniffedHosts.isEmpty()) { - logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); - } else { - this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); - } - } catch (Exception e) { - logger.error("error while sniffing nodes", e); - } finally { - scheduleNextRun(nextSniffDelayMillis); - running.set(false); - } + /* + * Skipped or already started tasks do nothing. In most cases tasks will be cancelled and not run, but we want to protect for + * cases where future#cancel returns true yet the task runs. We want to make sure that such tasks do nothing otherwise they will + * schedule another round at the end and so on, leaving us with multiple parallel sniffing "tracks" whish is undesirable. + */ + if (taskState.compareAndSet(TaskState.WAITING, TaskState.STARTED) == false) { + return; } - } - - synchronized void shutdown() { - scheduledExecutorService.shutdown(); try { - if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { - return; - } - scheduledExecutorService.shutdownNow(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); + sniff(); + } catch (Exception e) { + logger.error("error while sniffing nodes", e); + } finally { + Task task = new Task(sniffIntervalMillis); + Future future = scheduler.schedule(task, nextTaskDelay); + //tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable + ScheduledTask previousTask = nextScheduledTask; + nextScheduledTask = new ScheduledTask(task, future); + assert initialized.get() == false || + previousTask.task.isSkipped() || previousTask.task.hasStarted() : "task that we are replacing is neither " + + "cancelled nor has it ever started"; } } + + /** + * Returns true if the task has started, false in case it didn't start (yet?) or it was skipped + */ + boolean hasStarted() { + return taskState.get() == TaskState.STARTED; + } + + /** + * Sets this task to be skipped. Returns true if the task will be skipped, false if the task has already started. + */ + boolean skip() { + /* + * Threads may still get run although future#cancel returns true. We make sure that a task is either cancelled (or skipped), + * or entirely run. In the odd case that future#cancel returns true and the thread still runs, the task won't do anything. + * In case future#cancel returns true but the task has already started, this state change will not succeed hence this method + * returns false and the task will normally run. + */ + return taskState.compareAndSet(TaskState.WAITING, TaskState.SKIPPED); + } + + /** + * Returns true if the task was set to be skipped before it was started + */ + boolean isSkipped() { + return taskState.get() == TaskState.SKIPPED; + } + } + + static final class ScheduledTask { + final Task task; + final Future future; + + ScheduledTask(Task task, Future future) { + this.task = task; + this.future = future; + } + + /** + * Cancels this task. Returns true if the task has been successfully cancelled, meaning it won't be executed + * or if it is its execution won't have any effect. Returns false if the task cannot be cancelled (possibly it was + * already cancelled or already completed). + */ + boolean skip() { + /* + * Future#cancel should return false whenever a task cannot be cancelled, most likely as it has already started. We don't + * trust it much though so we try to cancel hoping that it will work. At the same time we always call skip too, which means + * that if the task has already started the state change will fail. We could potentially not call skip when cancel returns + * false but we prefer to stay on the safe side. + */ + future.cancel(false); + return task.skip(); + } + } + + final void sniff() throws IOException { + List sniffedHosts = hostsSniffer.sniffHosts(); + if (logger.isDebugEnabled()) { + logger.debug("sniffed hosts: " + sniffedHosts); + } + if (sniffedHosts.isEmpty()) { + logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); + } else { + restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); + } + } + + @Override + public void close() { + if (initialized.get()) { + nextScheduledTask.skip(); + } + this.scheduler.shutdown(); } /** @@ -158,8 +234,62 @@ public static SnifferBuilder builder(RestClient restClient) { return new SnifferBuilder(restClient); } - private static class SnifferThreadFactory implements ThreadFactory { + /** + * The Scheduler interface allows to isolate the sniffing scheduling aspects so that we can test + * the sniffer by injecting when needed a custom scheduler that is more suited for testing. + */ + interface Scheduler { + /** + * Schedules the provided {@link Runnable} to be executed in delayMillis milliseconds + */ + Future schedule(Task task, long delayMillis); + + /** + * Shuts this scheduler down + */ + void shutdown(); + } + + /** + * Default implementation of {@link Scheduler}, based on {@link ScheduledExecutorService} + */ + static final class DefaultScheduler implements Scheduler { + final ScheduledExecutorService executor; + + DefaultScheduler() { + this(initScheduledExecutorService()); + } + + DefaultScheduler(ScheduledExecutorService executor) { + this.executor = executor; + } + + private static ScheduledExecutorService initScheduledExecutorService() { + ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, new SnifferThreadFactory(SNIFFER_THREAD_NAME)); + executor.setRemoveOnCancelPolicy(true); + return executor; + } + + @Override + public Future schedule(Task task, long delayMillis) { + return executor.schedule(task, delayMillis, TimeUnit.MILLISECONDS); + } + + @Override + public void shutdown() { + executor.shutdown(); + try { + if (executor.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + executor.shutdownNow(); + } catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + } + } + } + static class SnifferThreadFactory implements ThreadFactory { private final AtomicInteger threadNumber = new AtomicInteger(1); private final String namePrefix; private final ThreadFactory originalThreadFactory; diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java index 5a52151d76e01..7550459e9ea50 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java @@ -21,7 +21,6 @@ import org.apache.http.HttpHost; -import java.io.IOException; import java.util.Collections; import java.util.List; @@ -30,7 +29,7 @@ */ class MockHostsSniffer implements HostsSniffer { @Override - public List sniffHosts() throws IOException { + public List sniffHosts() { return Collections.singletonList(new HttpHost("localhost", 9200)); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java new file mode 100644 index 0000000000000..8172774a77d80 --- /dev/null +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -0,0 +1,656 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestClientTestCase; +import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler; +import org.elasticsearch.client.sniff.Sniffer.Scheduler; +import org.mockito.Matchers; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class SnifferTests extends RestClientTestCase { + + /** + * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation + * to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance. + */ + public void testSniff() throws IOException { + HttpHost initialHost = new HttpHost("localhost", 9200); + try (RestClient restClient = RestClient.builder(initialHost).build()) { + Scheduler noOpScheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return mock(Future.class); + } + + @Override + public void shutdown() { + + } + }; + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + int iters = randomIntBetween(5, 30); + try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){ + { + assertEquals(1, restClient.getHosts().size()); + HttpHost httpHost = restClient.getHosts().get(0); + assertEquals("localhost", httpHost.getHostName()); + assertEquals(9200, httpHost.getPort()); + } + int emptyList = 0; + int failures = 0; + int runs = 0; + List lastHosts = Collections.singletonList(initialHost); + for (int i = 0; i < iters; i++) { + try { + runs++; + sniffer.sniff(); + if (hostsSniffer.failures.get() > failures) { + failures++; + fail("should have failed given that hostsSniffer says it threw an exception"); + } else if (hostsSniffer.emptyList.get() > emptyList) { + emptyList++; + assertEquals(lastHosts, restClient.getHosts()); + } else { + assertNotEquals(lastHosts, restClient.getHosts()); + List expectedHosts = CountingHostsSniffer.buildHosts(runs); + assertEquals(expectedHosts, restClient.getHosts()); + lastHosts = restClient.getHosts(); + } + } catch(IOException e) { + if (hostsSniffer.failures.get() > failures) { + failures++; + assertEquals("communication breakdown", e.getMessage()); + } + } + } + assertEquals(hostsSniffer.emptyList.get(), emptyList); + assertEquals(hostsSniffer.failures.get(), failures); + assertEquals(hostsSniffer.runs.get(), runs); + } + } + } + + /** + * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}. + * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled. + * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes + * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling. + * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff + * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values. + */ + public void testOrdinarySniffRounds() throws Exception { + final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + RestClient restClient = mock(RestClient.class); + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + final int iters = randomIntBetween(30, 100); + final Set> futures = new CopyOnWriteArraySet<>(); + final CountDownLatch completionLatch = new CountDownLatch(1); + final AtomicInteger runs = new AtomicInteger(iters); + final ExecutorService executor = Executors.newSingleThreadExecutor(); + final AtomicReference> lastFuture = new AtomicReference<>(); + final AtomicReference lastTask = new AtomicReference<>(); + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + assertEquals(sniffInterval, task.nextTaskDelay); + int numberOfRuns = runs.getAndDecrement(); + if (numberOfRuns == iters) { + //the first call is to schedule the first sniff round from the Sniffer constructor, with delay O + assertEquals(0L, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + } else { + //all of the subsequent times "schedule" is called with delay set to the configured sniff interval + assertEquals(sniffInterval, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + if (numberOfRuns == 0) { + completionLatch.countDown(); + return null; + } + } + //we submit rather than scheduling to make the test quick and not depend on time + Future future = executor.submit(task); + futures.add(future); + if (numberOfRuns == 1) { + lastFuture.set(future); + lastTask.set(task); + } + return future; + } + + @Override + public void shutdown() { + //the executor is closed externally, shutdown is tested separately + } + }; + try { + new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); + assertEquals(iters, futures.size()); + //the last future is the only one that may not be completed yet, as the count down happens + //while scheduling the next round which is still part of the execution of the runnable itself. + assertTrue(lastTask.get().hasStarted()); + lastFuture.get().get(); + for (Future future : futures) { + assertTrue(future.isDone()); + future.get(); + } + } finally { + executor.shutdown(); + assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)); + } + int totalRuns = hostsSniffer.runs.get(); + assertEquals(iters, totalRuns); + int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); + verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + verifyNoMoreInteractions(restClient); + } + + /** + * Test that {@link Sniffer#close()} shuts down the underlying {@link Scheduler}, and that such calls are idempotent. + * Also verifies that the next scheduled round gets cancelled. + */ + public void testClose() { + final Future future = mock(Future.class); + long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + RestClient restClient = mock(RestClient.class); + final AtomicInteger shutdown = new AtomicInteger(0); + final AtomicBoolean initialized = new AtomicBoolean(false); + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + if (initialized.compareAndSet(false, true)) { + //run from the same thread so the sniffer gets for sure initialized and the scheduled task gets cancelled on close + task.run(); + } + return future; + } + + @Override + public void shutdown() { + shutdown.incrementAndGet(); + } + }; + + Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); + assertEquals(0, shutdown.get()); + int iters = randomIntBetween(3, 10); + for (int i = 1; i <= iters; i++) { + sniffer.close(); + verify(future, times(i)).cancel(false); + assertEquals(i, shutdown.get()); + } + } + + public void testSniffOnFailureNotInitialized() { + RestClient restClient = mock(RestClient.class); + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + final AtomicInteger scheduleCalls = new AtomicInteger(0); + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + scheduleCalls.incrementAndGet(); + return null; + } + + @Override + public void shutdown() { + } + }; + + Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + for (int i = 0; i < 10; i++) { + sniffer.sniffOnFailure(); + } + assertEquals(1, scheduleCalls.get()); + int totalRuns = hostsSniffer.runs.get(); + assertEquals(0, totalRuns); + int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); + verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + verifyNoMoreInteractions(restClient); + } + + /** + * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always + * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round + * (either afterFailure or ordinary) gets cancelled. + */ + public void testSniffOnFailure() throws Exception { + RestClient restClient = mock(RestClient.class); + CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); + final AtomicBoolean initializing = new AtomicBoolean(true); + final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); + final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); + int minNumOnFailureRounds = randomIntBetween(5, 10); + final CountDownLatch initializingLatch = new CountDownLatch(1); + final Set ordinaryRoundsTasks = new CopyOnWriteArraySet<>(); + final AtomicReference> initializingFuture = new AtomicReference<>(); + final Set onFailureTasks = new CopyOnWriteArraySet<>(); + final Set afterFailureTasks = new CopyOnWriteArraySet<>(); + final AtomicBoolean onFailureCompleted = new AtomicBoolean(false); + final CountDownLatch completionLatch = new CountDownLatch(1); + final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + Scheduler scheduler = new Scheduler() { + @Override + public Future schedule(final Sniffer.Task task, long delayMillis) { + if (initializing.compareAndSet(true, false)) { + assertEquals(0L, delayMillis); + Future future = executor.submit(new Runnable() { + @Override + public void run() { + try { + task.run(); + } finally { + //we need to make sure that the sniffer is initialized, so the sniffOnFailure + //call does what it needs to do. Otherwise nothing happens until initialized. + initializingLatch.countDown(); + } + } + }); + assertTrue(initializingFuture.compareAndSet(null, future)); + return future; + } + if (delayMillis == 0L) { + Future future = executor.submit(task); + onFailureTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + if (delayMillis == sniffAfterFailureDelay) { + Future future = scheduleOrSubmit(task); + afterFailureTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + + assertEquals(sniffInterval, delayMillis); + assertEquals(sniffInterval, task.nextTaskDelay); + + if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) { + completionLatch.countDown(); + return mock(Future.class); + } + + Future future = scheduleOrSubmit(task); + ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future)); + return future; + } + + private Future scheduleOrSubmit(Sniffer.Task task) { + if (randomBoolean()) { + return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); + } else { + return executor.submit(task); + } + } + + @Override + public void shutdown() { + } + }; + final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); + assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); + + ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); + Set> onFailureFutures = new CopyOnWriteArraySet<>(); + try { + //with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped + //as another round is already running. We retry till enough runs get through as that's what we want to test. + while (onFailureTasks.size() < minNumOnFailureRounds) { + onFailureFutures.add(onFailureExecutor.submit(new Runnable() { + @Override + public void run() { + sniffer.sniffOnFailure(); + } + })); + } + assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); + for (Future onFailureFuture : onFailureFutures) { + assertNull(onFailureFuture.get()); + } + onFailureCompleted.set(true); + } finally { + onFailureExecutor.shutdown(); + onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + + assertFalse(initializingFuture.get().isCancelled()); + assertTrue(initializingFuture.get().isDone()); + assertNull(initializingFuture.get().get()); + + assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); + assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); + assertEquals(onFailureTasks.size(), afterFailureTasks.size()); + + for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) { + assertFalse(onFailureTask.future.isCancelled()); + assertTrue(onFailureTask.future.isDone()); + assertNull(onFailureTask.future.get()); + assertTrue(onFailureTask.task.hasStarted()); + assertFalse(onFailureTask.task.isSkipped()); + } + + int cancelledTasks = 0; + int completedTasks = onFailureTasks.size() + 1; + for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) { + if (assertTaskCancelledOrCompleted(afterFailureTask)) { + completedTasks++; + } else { + cancelledTasks++; + } + } + + assertThat(ordinaryRoundsTasks.size(), greaterThan(0)); + for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) { + if (assertTaskCancelledOrCompleted(task)) { + completedTasks++; + } else { + cancelledTasks++; + } + } + assertEquals(onFailureTasks.size(), cancelledTasks); + + assertEquals(completedTasks, hostsSniffer.runs.get()); + int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); + verify(restClient, times(setHostsRuns)).setHosts(Matchers.anyVararg()); + verifyNoMoreInteractions(restClient); + } finally { + executor.shutdown(); + executor.awaitTermination(1000L, TimeUnit.MILLISECONDS); + } + } + + private static boolean assertTaskCancelledOrCompleted(Sniffer.ScheduledTask task) throws ExecutionException, InterruptedException { + if (task.task.isSkipped()) { + assertTrue(task.future.isCancelled()); + try { + task.future.get(); + fail("cancellation exception should have been thrown"); + } catch(CancellationException ignore) { + } + return false; + } else { + try { + assertNull(task.future.get()); + } catch(CancellationException ignore) { + assertTrue(task.future.isCancelled()); + } + assertTrue(task.future.isDone()); + assertTrue(task.task.hasStarted()); + return true; + } + } + + public void testTaskCancelling() throws Exception { + RestClient restClient = mock(RestClient.class); + HostsSniffer hostsSniffer = mock(HostsSniffer.class); + Scheduler noOpScheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return null; + } + + @Override + public void shutdown() { + } + }; + Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); + try { + int numIters = randomIntBetween(50, 100); + for (int i = 0; i < numIters; i++) { + Sniffer.Task task = sniffer.new Task(0L); + TaskWrapper wrapper = new TaskWrapper(task); + Future future; + if (rarely()) { + future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); + } else { + future = executor.submit(wrapper); + } + Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future); + boolean skip = scheduledTask.skip(); + try { + assertNull(future.get()); + } catch(CancellationException ignore) { + assertTrue(future.isCancelled()); + } + + if (skip) { + //the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel), + //or skipped, in which case it will run but do nothing (thanks to Task#skip). + //Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything, + //otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to + // make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on. + assertFalse(task.hasStarted()); + assertTrue(task.isSkipped()); + assertTrue(future.isCancelled()); + assertTrue(future.isDone()); + } else { + //if a future is cancelled when its execution has already started, future#get throws CancellationException before + //completion. The execution continues though so we use a latch to try and wait for the task to be completed. + //Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be + //missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing. + assertTrue(wrapper.await()); + //the future may or may not be cancelled but the task has for sure started and completed + assertTrue(task.toString(), task.hasStarted()); + assertFalse(task.isSkipped()); + assertTrue(future.isDone()); + } + //subsequent cancel calls return false for sure + int cancelCalls = randomIntBetween(1, 10); + for (int j = 0; j < cancelCalls; j++) { + assertFalse(scheduledTask.skip()); + } + } + } finally { + executor.shutdown(); + executor.awaitTermination(1000, TimeUnit.MILLISECONDS); + } + } + + /** + * Wraps a {@link Sniffer.Task} and allows to wait for its completion. This is needed to verify + * that tasks are either never started or always completed. Calling {@link Future#get()} against a cancelled future will + * throw {@link CancellationException} straight-away but the execution of the task will continue if it had already started, + * in which case {@link Future#cancel(boolean)} returns true which is not very helpful. + */ + private static final class TaskWrapper implements Runnable { + final Sniffer.Task task; + final CountDownLatch completionLatch = new CountDownLatch(1); + + TaskWrapper(Sniffer.Task task) { + this.task = task; + } + + @Override + public void run() { + try { + task.run(); + } finally { + completionLatch.countDown(); + } + } + + boolean await() throws InterruptedException { + return completionLatch.await(1000, TimeUnit.MILLISECONDS); + } + } + + /** + * Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host. + * It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled. + * It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run + * at a given point in time. + */ + private static class CountingHostsSniffer implements HostsSniffer { + private final AtomicInteger runs = new AtomicInteger(0); + private final AtomicInteger failures = new AtomicInteger(0); + private final AtomicInteger emptyList = new AtomicInteger(0); + + @Override + public List sniffHosts() throws IOException { + int run = runs.incrementAndGet(); + if (rarely()) { + failures.incrementAndGet(); + //check that if communication breaks, sniffer keeps on working + throw new IOException("communication breakdown"); + } + if (rarely()) { + emptyList.incrementAndGet(); + return Collections.emptyList(); + } + return buildHosts(run); + } + + private static List buildHosts(int run) { + int size = run % 5 + 1; + assert size > 0; + List hosts = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + hosts.add(new HttpHost("sniffed-" + run, 9200 + i)); + } + return hosts; + } + } + + @SuppressWarnings("unchecked") + public void testDefaultSchedulerSchedule() { + RestClient restClient = mock(RestClient.class); + HostsSniffer hostsSniffer = mock(HostsSniffer.class); + Scheduler noOpScheduler = new Scheduler() { + @Override + public Future schedule(Sniffer.Task task, long delayMillis) { + return mock(Future.class); + } + + @Override + public void shutdown() { + + } + }; + Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); + Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE)); + + ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); + final ScheduledFuture mockedFuture = mock(ScheduledFuture.class); + when(scheduledExecutorService.schedule(any(Runnable.class), any(Long.class), any(TimeUnit.class))) + .then(new Answer>() { + @Override + public ScheduledFuture answer(InvocationOnMock invocationOnMock) { + return mockedFuture; + } + }); + DefaultScheduler scheduler = new DefaultScheduler(scheduledExecutorService); + long delay = randomLongBetween(1, Long.MAX_VALUE); + Future future = scheduler.schedule(task, delay); + assertSame(mockedFuture, future); + verify(scheduledExecutorService).schedule(task, delay, TimeUnit.MILLISECONDS); + verifyNoMoreInteractions(scheduledExecutorService, mockedFuture); + } + + public void testDefaultSchedulerThreadFactory() { + DefaultScheduler defaultScheduler = new DefaultScheduler(); + try { + ScheduledExecutorService executorService = defaultScheduler.executor; + assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class)); + assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class)); + ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) executorService; + assertTrue(executor.getRemoveOnCancelPolicy()); + assertFalse(executor.getContinueExistingPeriodicTasksAfterShutdownPolicy()); + assertTrue(executor.getExecuteExistingDelayedTasksAfterShutdownPolicy()); + assertThat(executor.getThreadFactory(), instanceOf(Sniffer.SnifferThreadFactory.class)); + int iters = randomIntBetween(3, 10); + for (int i = 1; i <= iters; i++) { + Thread thread = executor.getThreadFactory().newThread(new Runnable() { + @Override + public void run() { + + } + }); + assertThat(thread.getName(), equalTo("es_rest_client_sniffer[T#" + i + "]")); + assertThat(thread.isDaemon(), is(true)); + } + } finally { + defaultScheduler.shutdown(); + } + } + + public void testDefaultSchedulerShutdown() throws Exception { + ScheduledThreadPoolExecutor executor = mock(ScheduledThreadPoolExecutor.class); + DefaultScheduler defaultScheduler = new DefaultScheduler(executor); + defaultScheduler.shutdown(); + verify(executor).shutdown(); + verify(executor).awaitTermination(1000, TimeUnit.MILLISECONDS); + verify(executor).shutdownNow(); + verifyNoMoreInteractions(executor); + + when(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)).thenReturn(true); + defaultScheduler.shutdown(); + verify(executor, times(2)).shutdown(); + verify(executor, times(2)).awaitTermination(1000, TimeUnit.MILLISECONDS); + verifyNoMoreInteractions(executor); + } +} diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java index e91fbe6e7dfdc..af0c543678048 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporter.java @@ -41,8 +41,6 @@ import org.joda.time.format.DateTimeFormatter; import javax.net.ssl.SSLContext; - -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -658,12 +656,12 @@ public void doClose() { if (sniffer != null) { sniffer.close(); } - } catch (IOException | RuntimeException e) { + } catch (Exception e) { logger.error("an error occurred while closing the internal client sniffer", e); } finally { try { client.close(); - } catch (IOException | RuntimeException e) { + } catch (Exception e) { logger.error("an error occurred while closing the internal client", e); } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java index 6590232fda1ff..92febdf3561f8 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListener.java @@ -86,7 +86,7 @@ public void onFailure(final HttpHost host) { resource.markDirty(); } if (sniffer != null) { - sniffer.sniffOnFailure(host); + sniffer.sniffOnFailure(); } } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java index f1ecb799406e8..08512e82e145d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/NodeFailureListenerTests.java @@ -46,7 +46,7 @@ public void testSnifferNotifiedOnFailure() { listener.onFailure(host); - verify(sniffer).sniffOnFailure(host); + verify(sniffer).sniffOnFailure(); } public void testResourceNotifiedOnFailure() { @@ -71,7 +71,7 @@ public void testResourceAndSnifferNotifiedOnFailure() { } if (optionalSniffer != null) { - verify(sniffer).sniffOnFailure(host); + verify(sniffer).sniffOnFailure(); } } From 50ddd7d9780f877abe88e01e1b71a46d1c465aca Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 1 Jun 2018 08:53:24 +0200 Subject: [PATCH 09/18] High-level client: list tasks failure to not lose nodeId (#31001) This commit reworks testing for `ListTasksResponse` so that random fields insertion can be tested and xcontent equivalence can be checked too. Proper exclusions need to be configured, and failures need to be tested separately. This helped finding a little problem, whenever there is a node failure returned, the nodeId was lost as it was never printed out as part of the exception toXContent. --- .../elasticsearch/ElasticsearchException.java | 3 +- .../action/FailedNodeException.java | 6 + .../node/tasks/list/ListTasksResponse.java | 2 +- .../org/elasticsearch/tasks/TaskInfo.java | 3 +- .../tasks/ListTasksResponseTests.java | 117 ++++++++++++++---- .../elasticsearch/tasks/TaskInfoTests.java | 3 +- 6 files changed, 102 insertions(+), 32 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 89f7aa7cfdb34..e0234d70bdc9c 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -269,7 +269,6 @@ public String getDetailedMessage() { } } - /** * Retrieve the innermost cause of this exception, if none, returns the current exception. */ @@ -292,7 +291,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); } else { - HashMap> finalHeaders = new HashMap<>(headers.size() + metadata.size()); + Map> finalHeaders = new HashMap<>(headers.size() + metadata.size()); finalHeaders.putAll(headers); finalHeaders.putAll(metadata); out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString); diff --git a/server/src/main/java/org/elasticsearch/action/FailedNodeException.java b/server/src/main/java/org/elasticsearch/action/FailedNodeException.java index bf9aad0d39ebe..475de7eae5faf 100644 --- a/server/src/main/java/org/elasticsearch/action/FailedNodeException.java +++ b/server/src/main/java/org/elasticsearch/action/FailedNodeException.java @@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -48,4 +49,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(nodeId); } + + @Override + protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("node_id", nodeId); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 1233b7143ab77..53d80853328b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -266,6 +266,6 @@ public static ListTasksResponse fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(this); + return Strings.toString(this, true, true); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index 26aabec3e9fc2..d01b8cfaa3cdf 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParserHelper; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -259,7 +258,7 @@ public static TaskInfo fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(this); + return Strings.toString(this, true, true); } // Implements equals and hashCode for testing diff --git a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java index 295ff955e41a5..b280446db1c74 100644 --- a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java @@ -23,29 +23,29 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.net.ConnectException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Objects; +import java.util.function.Predicate; +import java.util.function.Supplier; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.instanceOf; public class ListTasksResponseTests extends AbstractXContentTestCase { public void testEmptyToString() { - assertEquals("{\"tasks\":[]}", new ListTasksResponse().toString()); + assertEquals("{\n" + + " \"tasks\" : [ ]\n" + + "}", new ListTasksResponse().toString()); } public void testNonEmptyToString() { @@ -53,48 +53,113 @@ public void testNonEmptyToString() { new TaskId("node1", 1), "dummy-type", "dummy-action", "dummy-description", null, 0, 1, true, new TaskId("node1", 0), Collections.singletonMap("foo", "bar")); ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList()); - assertEquals("{\"tasks\":[{\"node\":\"node1\",\"id\":1,\"type\":\"dummy-type\",\"action\":\"dummy-action\"," - + "\"description\":\"dummy-description\",\"start_time_in_millis\":0,\"running_time_in_nanos\":1,\"cancellable\":true," - + "\"parent_task_id\":\"node1:0\",\"headers\":{\"foo\":\"bar\"}}]}", tasksResponse.toString()); + assertEquals("{\n" + + " \"tasks\" : [\n" + + " {\n" + + " \"node\" : \"node1\",\n" + + " \"id\" : 1,\n" + + " \"type\" : \"dummy-type\",\n" + + " \"action\" : \"dummy-action\",\n" + + " \"description\" : \"dummy-description\",\n" + + " \"start_time\" : \"1970-01-01T00:00:00.000Z\",\n" + + " \"start_time_in_millis\" : 0,\n" + + " \"running_time\" : \"1nanos\",\n" + + " \"running_time_in_nanos\" : 1,\n" + + " \"cancellable\" : true,\n" + + " \"parent_task_id\" : \"node1:0\",\n" + + " \"headers\" : {\n" + + " \"foo\" : \"bar\"\n" + + " }\n" + + " }\n" + + " ]\n" + + "}", tasksResponse.toString()); } @Override protected ListTasksResponse createTestInstance() { + //failures are tested separately, so we can test xcontent equivalence at least when we have no failures + return new ListTasksResponse(randomTasks(), Collections.emptyList(), Collections.emptyList()); + } + + private static List randomTasks() { List tasks = new ArrayList<>(); for (int i = 0; i < randomInt(10); i++) { tasks.add(TaskInfoTests.randomTaskInfo()); } - List taskFailures = new ArrayList<>(); - for (int i = 0; i < randomInt(5); i++) { - taskFailures.add(new TaskOperationFailure( - randomAlphaOfLength(5), randomNonNegativeLong(), new IllegalStateException("message"))); - } - return new ListTasksResponse(tasks, taskFailures, Collections.singletonList(new FailedNodeException("", "message", null))); + return tasks; } @Override - protected ListTasksResponse doParseInstance(XContentParser parser) throws IOException { + protected ListTasksResponse doParseInstance(XContentParser parser) { return ListTasksResponse.fromXContent(parser); } @Override protected boolean supportsUnknownFields() { - return false; + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + //status and headers hold arbitrary content, we can't inject random fields in them + return field -> field.endsWith("status") || field.endsWith("headers"); } @Override protected void assertEqualInstances(ListTasksResponse expectedInstance, ListTasksResponse newInstance) { assertNotSame(expectedInstance, newInstance); assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); - assertThat(newInstance.getNodeFailures().size(), equalTo(1)); - for (ElasticsearchException failure : newInstance.getNodeFailures()) { - assertThat(failure, notNullValue()); - assertThat(failure.getMessage(), equalTo("Elasticsearch exception [type=failed_node_exception, reason=message]")); + assertThat(newInstance.getNodeFailures().size(), equalTo(expectedInstance.getNodeFailures().size())); + for (int i = 0; i < newInstance.getNodeFailures().size(); i++) { + ElasticsearchException newException = newInstance.getNodeFailures().get(i); + ElasticsearchException expectedException = expectedInstance.getNodeFailures().get(i); + assertThat(newException.getMetadata("es.node_id").get(0), equalTo(((FailedNodeException)expectedException).nodeId())); + assertThat(newException.getMessage(), equalTo("Elasticsearch exception [type=failed_node_exception, reason=error message]")); + assertThat(newException.getCause(), instanceOf(ElasticsearchException.class)); + ElasticsearchException cause = (ElasticsearchException) newException.getCause(); + assertThat(cause.getMessage(), equalTo("Elasticsearch exception [type=connect_exception, reason=null]")); + } + assertThat(newInstance.getTaskFailures().size(), equalTo(expectedInstance.getTaskFailures().size())); + for (int i = 0; i < newInstance.getTaskFailures().size(); i++) { + TaskOperationFailure newFailure = newInstance.getTaskFailures().get(i); + TaskOperationFailure expectedFailure = expectedInstance.getTaskFailures().get(i); + assertThat(newFailure.getNodeId(), equalTo(expectedFailure.getNodeId())); + assertThat(newFailure.getTaskId(), equalTo(expectedFailure.getTaskId())); + assertThat(newFailure.getStatus(), equalTo(expectedFailure.getStatus())); + assertThat(newFailure.getCause(), instanceOf(ElasticsearchException.class)); + ElasticsearchException cause = (ElasticsearchException) newFailure.getCause(); + assertThat(cause.getMessage(), equalTo("Elasticsearch exception [type=illegal_state_exception, reason=null]")); } } - @Override - protected boolean assertToXContentEquivalence() { - return false; + /** + * Test parsing {@link ListTasksResponse} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = ListTasksResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence); + } + + private static ListTasksResponse createTestInstanceWithFailures() { + int numNodeFailures = randomIntBetween(0, 3); + List nodeFailures = new ArrayList<>(numNodeFailures); + for (int i = 0; i < numNodeFailures; i++) { + nodeFailures.add(new FailedNodeException(randomAlphaOfLength(5), "error message", new ConnectException())); + } + int numTaskFailures = randomIntBetween(0, 3); + List taskFailures = new ArrayList<>(numTaskFailures); + for (int i = 0; i < numTaskFailures; i++) { + taskFailures.add(new TaskOperationFailure(randomAlphaOfLength(5), randomLong(), new IllegalStateException())); + } + return new ListTasksResponse(randomTasks(), taskFailures, nodeFailures); } } diff --git a/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java b/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java index 616ac1053871e..bc57109802afb 100644 --- a/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java @@ -63,11 +63,12 @@ protected boolean supportsUnknownFields() { @Override protected Predicate getRandomFieldsExcludeFilter() { + //status and headers hold arbitrary content, we can't inject random fields in them return field -> "status".equals(field) || "headers".equals(field); } @Override - protected TaskInfo mutateInstance(TaskInfo info) throws IOException { + protected TaskInfo mutateInstance(TaskInfo info) { switch (between(0, 9)) { case 0: TaskId taskId = new TaskId(info.getTaskId().getNodeId() + randomAlphaOfLength(5), info.getTaskId().getId()); From f76b225975684264f62a578a088ba042be1c3fc9 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Fri, 1 Jun 2018 08:53:53 +0200 Subject: [PATCH 10/18] Cross Cluster Search: preserve remote status code (#30976) In case an error is returned when calling search_shards on a remote cluster, which will lead to throwing an exception in the coordinating node, we should make sure that the status code returned by the coordinating node is the same as the one returned by the remote cluster. Up until now a 500 - Internal Server Error was always returned. This commit changes this behaviour so that for instance if an index is not found, which causes an 404, a 404 is also returned by the coordinating node to the client. Closes #27461 --- .../test/multi_cluster/50_missing.yml | 4 ++-- .../transport/RemoteClusterService.java | 8 +++---- .../RemoteClusterConnectionTests.java | 11 +++++++--- .../transport/RemoteClusterServiceTests.java | 22 +++++++++++++++---- .../test/multi_cluster/50_missing.yml | 4 ++-- 5 files changed, 34 insertions(+), 15 deletions(-) diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml index c40e7be1c643e..ac47fe000dd0d 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml @@ -1,7 +1,7 @@ --- "Search with missing remote index pattern": - do: - catch: "request" + catch: "missing" search: index: "my_remote_cluster:foo" @@ -34,7 +34,7 @@ - match: { aggregations.cluster.buckets.0.doc_count: 6 } - do: - catch: "request" + catch: "missing" search: index: "my_remote_cluster:test_index,my_remote_cluster:foo" body: diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 2109910d64f04..fb67faeb66a97 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -216,7 +216,7 @@ public void collectSearchShards(IndicesOptions indicesOptions, String preference ActionListener> listener) { final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); final Map searchShardsResponses = new ConcurrentHashMap<>(); - final AtomicReference transportException = new AtomicReference<>(); + final AtomicReference transportException = new AtomicReference<>(); for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { final String clusterName = entry.getKey(); RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName); @@ -233,7 +233,7 @@ public void collectSearchShards(IndicesOptions indicesOptions, String preference public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { searchShardsResponses.put(clusterName, clusterSearchShardsResponse); if (responsesCountDown.countDown()) { - TransportException exception = transportException.get(); + RemoteTransportException exception = transportException.get(); if (exception == null) { listener.onResponse(searchShardsResponses); } else { @@ -244,8 +244,8 @@ public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) @Override public void onFailure(Exception e) { - TransportException exception = new TransportException("unable to communicate with remote cluster [" + - clusterName + "]", e); + RemoteTransportException exception = new RemoteTransportException("error while communicating with remote cluster [" + + clusterName + "]", e); if (transportException.compareAndSet(null, exception) == false) { exception = transportException.accumulateAndGet(exception, (previous, current) -> { current.addSuppressed(previous); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index ed17af26b6a49..b792da9ae7a87 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -51,7 +50,9 @@ import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -120,8 +121,12 @@ public static MockTransportService startTransport( try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ClusterSearchShardsRequest::new, ThreadPool.Names.SAME, (request, channel) -> { - channel.sendResponse(new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], - knownNodes.toArray(new DiscoveryNode[0]), Collections.emptyMap())); + if ("index_not_found".equals(request.preference())) { + channel.sendResponse(new IndexNotFoundException("index")); + } else { + channel.sendResponse(new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], + knownNodes.toArray(new DiscoveryNode[0]), Collections.emptyMap())); + } }); newService.registerRequestHandler(ClusterStateAction.NAME, ClusterStateRequest::new, ThreadPool.Names.SAME, (request, channel) -> { diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 5529f98af3342..03d76b5a953c6 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -469,7 +470,6 @@ public void onFailure(Exception e) { assertEquals("no such remote cluster: [no such cluster]", ex.get().getMessage()); } { - logger.info("closing all source nodes"); // close all targets and check for the transport level failure path IOUtils.close(c1N1, c1N2, c2N1, c2N2); @@ -559,7 +559,20 @@ public void testCollectSearchShards() throws Exception { assertEquals(1, shardsResponse.getNodes().length); } } - + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicReference> response = new AtomicReference<>(); + AtomicReference failure = new AtomicReference<>(); + remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found", + null, remoteIndicesByCluster, + new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); + assertTrue(latch.await(1, TimeUnit.SECONDS)); + assertNull(response.get()); + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); + assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + } int numDisconnectedClusters = randomIntBetween(1, numClusters); Set disconnectedNodes = new HashSet<>(numDisconnectedClusters); Set disconnectedNodesIndices = new HashSet<>(numDisconnectedClusters); @@ -593,8 +606,9 @@ public void onNodeDisconnected(DiscoveryNode node) { assertTrue(latch.await(1, TimeUnit.SECONDS)); assertNull(response.get()); assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(TransportException.class)); - assertThat(failure.get().getMessage(), containsString("unable to communicate with remote cluster")); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); + assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); } //setting skip_unavailable to true for all the disconnected clusters will make the request succeed again diff --git a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml index 9c445f418daf2..0b224518782c3 100644 --- a/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml +++ b/x-pack/qa/multi-cluster-search-security/src/test/resources/rest-api-spec/test/multi_cluster/50_missing.yml @@ -56,13 +56,13 @@ teardown: - match: { hits.total: 0 } - do: - catch: "request" + catch: "forbidden" headers: { Authorization: "Basic am9lOnMza3JpdA==" } search: index: "*:foo-bar" - do: - catch: "request" + catch: "forbidden" headers: { Authorization: "Basic am9lOnMza3JpdA==" } search: index: "my_remote_cluster:foo-bar" From 7d955f8163f2cbb9b5fe76f4d9063ead21c303e7 Mon Sep 17 00:00:00 2001 From: Sohaib Iftikhar Date: Fri, 1 Jun 2018 08:55:43 +0200 Subject: [PATCH 11/18] REST high-level client: add get ingest pipeline API (#30847) Relates to #27205 --- .../elasticsearch/client/ClusterClient.java | 24 ++++ .../client/RequestConverters.java | 13 ++ .../elasticsearch/client/ClusterClientIT.java | 53 +++---- .../client/ESRestHighLevelClientTestCase.java | 43 ++++++ .../client/RequestConvertersTests.java | 15 ++ .../ClusterClientDocumentationIT.java | 74 ++++++++++ .../high-level/cluster/get_pipeline.asciidoc | 75 ++++++++++ .../high-level/supported-apis.asciidoc | 2 + .../action/ingest/GetPipelineResponse.java | 77 +++++++++- .../ingest/PipelineConfiguration.java | 5 +- .../ingest/GetPipelineResponseTests.java | 131 ++++++++++++++++++ 11 files changed, 483 insertions(+), 29 deletions(-) create mode 100644 docs/java-rest/high-level/cluster/get_pipeline.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index 846f29bfb6ef9..34441d4160f4c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.GetPipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineResponse; @@ -87,4 +89,26 @@ public void putPipelineAsync(PutPipelineRequest request, ActionListener + * See + * Get Pipeline API on elastic.co + */ + public GetPipelineResponse getPipeline(GetPipelineRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline, + GetPipelineResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously get an existing pipeline + *

+ * See + * Get Pipeline API on elastic.co + */ + public void getPipelineAsync(GetPipelineRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline, + GetPipelineResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 2e71cad20d0f2..0e7e757f9aded 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -61,6 +61,7 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.SearchRequest; @@ -631,6 +632,18 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett return request; } + static Request getPipeline(GetPipelineRequest getPipelineRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_ingest/pipeline") + .addCommaSeparatedPathParts(getPipelineRequest.getIds()) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(getPipelineRequest.masterNodeTimeout()); + return request; + } + static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ingest/pipeline") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 44332b058bc15..caab4c282f4d4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -22,6 +22,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.GetPipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -32,7 +34,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -113,31 +115,7 @@ public void testClusterUpdateSettingNonExistent() { public void testPutPipeline() throws IOException { String id = "some_pipeline_id"; - XContentType xContentType = randomFrom(XContentType.values()); - XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent()); - pipelineBuilder.startObject(); - { - pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors"); - pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY); - { - pipelineBuilder.startObject().startObject("set"); - { - pipelineBuilder - .field("field", "foo") - .field("value", "bar"); - } - pipelineBuilder.endObject().endObject(); - pipelineBuilder.startObject().startObject("convert"); - { - pipelineBuilder - .field("field", "rank") - .field("type", "integer"); - } - pipelineBuilder.endObject().endObject(); - } - pipelineBuilder.endArray(); - } - pipelineBuilder.endObject(); + XContentBuilder pipelineBuilder = buildRandomXContentPipeline(); PutPipelineRequest request = new PutPipelineRequest( id, BytesReference.bytes(pipelineBuilder), @@ -147,4 +125,27 @@ public void testPutPipeline() throws IOException { execute(request, highLevelClient().cluster()::putPipeline, highLevelClient().cluster()::putPipelineAsync); assertTrue(putPipelineResponse.isAcknowledged()); } + + public void testGetPipeline() throws IOException { + String id = "some_pipeline_id"; + XContentBuilder pipelineBuilder = buildRandomXContentPipeline(); + { + PutPipelineRequest request = new PutPipelineRequest( + id, + BytesReference.bytes(pipelineBuilder), + pipelineBuilder.contentType() + ); + createPipeline(request); + } + + GetPipelineRequest request = new GetPipelineRequest(id); + + GetPipelineResponse response = + execute(request, highLevelClient().cluster()::getPipeline, highLevelClient().cluster()::getPipelineAsync); + assertTrue(response.isFound()); + assertEquals(response.pipelines().get(0).getId(), id); + PipelineConfiguration expectedConfig = + new PipelineConfiguration(id, BytesReference.bytes(pipelineBuilder), pipelineBuilder.contentType()); + assertEquals(expectedConfig.getConfigAsMap(), response.pipelines().get(0).getConfigAsMap()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index aabe2c4d1e270..f7a934405c2ae 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -21,7 +21,12 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; import org.junit.Before; @@ -80,4 +85,42 @@ private HighLevelClient(RestClient restClient) { super(restClient, (client) -> {}, Collections.emptyList()); } } + + protected static XContentBuilder buildRandomXContentPipeline() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent()); + pipelineBuilder.startObject(); + { + pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors"); + pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY); + { + pipelineBuilder.startObject().startObject("set"); + { + pipelineBuilder + .field("field", "foo") + .field("value", "bar"); + } + pipelineBuilder.endObject().endObject(); + pipelineBuilder.startObject().startObject("convert"); + { + pipelineBuilder + .field("field", "rank") + .field("type", "integer"); + } + pipelineBuilder.endObject().endObject(); + } + pipelineBuilder.endArray(); + } + pipelineBuilder.endObject(); + return pipelineBuilder; + } + + protected static void createPipeline(String pipelineId) throws IOException { + XContentBuilder builder = buildRandomXContentPipeline(); + createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType())); + } + + protected static void createPipeline(PutPipelineRequest putPipelineRequest) throws IOException { + assertOK(client().performRequest(RequestConverters.putPipeline(putPipelineRequest))); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index dd54d17e7c865..acd217c5ecdcb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.MultiSearchRequest; @@ -1482,6 +1483,20 @@ public void testPutPipeline() throws IOException { assertEquals(expectedParams, expectedRequest.getParameters()); } + public void testGetPipeline() { + String pipelineId = "some_pipeline_id"; + Map expectedParams = new HashMap<>(); + GetPipelineRequest request = new GetPipelineRequest("some_pipeline_id"); + setRandomMasterTimeout(request, expectedParams); + Request expectedRequest = RequestConverters.getPipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + endpoint.add(pipelineId); + assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod()); + assertEquals(expectedParams, expectedRequest.getParameters()); + } + public void testRollover() throws IOException { RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10), randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 29bb2d05afcc7..07785ecc03dc4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.GetPipelineRequest; +import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -34,11 +36,13 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.ingest.PipelineConfiguration; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -257,4 +261,74 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testGetPipeline() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + createPipeline("my-pipeline-id"); + } + + { + // tag::get-pipeline-request + GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id"); // <1> + // end::get-pipeline-request + + // tag::get-pipeline-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::get-pipeline-request-masterTimeout + + // tag::get-pipeline-execute + GetPipelineResponse response = client.cluster().getPipeline(request); // <1> + // end::get-pipeline-execute + + // tag::get-pipeline-response + boolean successful = response.isFound(); // <1> + List pipelines = response.pipelines(); // <2> + for(PipelineConfiguration pipeline: pipelines) { + Map config = pipeline.getConfigAsMap(); // <3> + } + // end::get-pipeline-response + + assertTrue(successful); + } + } + + public void testGetPipelineAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createPipeline("my-pipeline-id"); + } + + { + GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id"); + + // tag::get-pipeline-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetPipelineResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-pipeline-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-pipeline-execute-async + client.cluster().getPipelineAsync(request, listener); // <1> + // end::get-pipeline-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/docs/java-rest/high-level/cluster/get_pipeline.asciidoc b/docs/java-rest/high-level/cluster/get_pipeline.asciidoc new file mode 100644 index 0000000000000..d6a9472a715e1 --- /dev/null +++ b/docs/java-rest/high-level/cluster/get_pipeline.asciidoc @@ -0,0 +1,75 @@ +[[java-rest-high-cluster-get-pipeline]] +=== Get Pipeline API + +[[java-rest-high-cluster-get-pipeline-request]] +==== Get Pipeline Request + +A `GetPipelineRequest` requires one or more `pipelineIds` to fetch. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-request] +-------------------------------------------------- +<1> The pipeline id to fetch + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-cluster-get-pipeline-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute] +-------------------------------------------------- +<1> Execute the request and get back the response in a GetPipelineResponse object. + +[[java-rest-high-cluster-get-pipeline-async]] +==== Asynchronous Execution + +The asynchronous execution of a get pipeline request requires both the `GetPipelineRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute-async] +-------------------------------------------------- +<1> The `GetPipelineRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `GetPipelineResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-get-pipeline-response]] +==== Get Pipeline Response + +The returned `GetPipelineResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-response] +-------------------------------------------------- +<1> Check if a matching pipeline id was found or not. +<2> Get the list of pipelines found as a list of `PipelineConfig` objects. +<3> Get the individual configuration of each pipeline as a `Map`. diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 841eba67f64ab..a196a44762f7f 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -107,9 +107,11 @@ The Java High Level REST Client supports the following Cluster APIs: * <> * <> +* <> include::cluster/put_settings.asciidoc[] include::cluster/put_pipeline.asciidoc[] +include::cluster/get_pipeline.asciidoc[] == Snapshot APIs diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index 30843bdff9b28..297a7f0efc1d2 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -20,16 +20,24 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.HashMap; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; public class GetPipelineResponse extends ActionResponse implements StatusToXContentObject { @@ -42,8 +50,13 @@ public GetPipelineResponse(List pipelines) { this.pipelines = pipelines; } + /** + * Get the list of pipelines that were a part of this response. + * The pipeline id can be obtained using getId on the PipelineConfiguration object. + * @return A list of {@link PipelineConfiguration} objects. + */ public List pipelines() { - return pipelines; + return Collections.unmodifiableList(pipelines); } @Override @@ -83,4 +96,66 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + + /** + * + * @param parser the parser for the XContent that contains the serialized GetPipelineResponse. + * @return an instance of GetPipelineResponse read from the parser + * @throws IOException If the parsing fails + */ + public static GetPipelineResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + List pipelines = new ArrayList<>(); + while(parser.nextToken().equals(Token.FIELD_NAME)) { + String pipelineId = parser.currentName(); + parser.nextToken(); + XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent()); + contentBuilder.generator().copyCurrentStructure(parser); + PipelineConfiguration pipeline = + new PipelineConfiguration( + pipelineId, BytesReference.bytes(contentBuilder), contentBuilder.contentType() + ); + pipelines.add(pipeline); + } + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.currentToken(), parser::getTokenLocation); + return new GetPipelineResponse(pipelines); + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } else if (other instanceof GetPipelineResponse){ + GetPipelineResponse otherResponse = (GetPipelineResponse)other; + if (pipelines == null) { + return otherResponse.pipelines == null; + } else { + // We need a map here because order does not matter for equality + Map otherPipelineMap = new HashMap<>(); + for (PipelineConfiguration pipeline: otherResponse.pipelines) { + otherPipelineMap.put(pipeline.getId(), pipeline); + } + for (PipelineConfiguration pipeline: pipelines) { + PipelineConfiguration otherPipeline = otherPipelineMap.get(pipeline.getId()); + if (!pipeline.equals(otherPipeline)) { + return false; + } + } + return true; + } + } else { + return false; + } + } + + @Override + public int hashCode() { + int result = 1; + for (PipelineConfiguration pipeline: pipelines) { + // We only take the sum here to ensure that the order does not matter. + result += (pipeline == null ? 0 : pipeline.hashCode()); + } + return result; + } + } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 737bad8ee5b0c..3b296bf52d306 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -148,14 +149,14 @@ public boolean equals(Object o) { PipelineConfiguration that = (PipelineConfiguration) o; if (!id.equals(that.id)) return false; - return config.equals(that.config); + return getConfigAsMap().equals(that.getConfigAsMap()); } @Override public int hashCode() { int result = id.hashCode(); - result = 31 * result + config.hashCode(); + result = 31 * result + getConfigAsMap().hashCode(); return result; } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java new file mode 100644 index 0000000000000..95424acce77d4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.ingest; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class GetPipelineResponseTests extends AbstractStreamableXContentTestCase { + + private XContentBuilder getRandomXContentBuilder() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + return XContentBuilder.builder(xContentType.xContent()); + } + + private PipelineConfiguration createRandomPipeline(String pipelineId) throws IOException { + String field = "field_" + randomInt(); + String value = "value_" + randomInt(); + XContentBuilder builder = getRandomXContentBuilder(); + builder.startObject(); + // We only use a single SetProcessor here in each pipeline to test. + // Since the contents are returned as a configMap anyway this does not matter for fromXContent + builder.startObject("set"); + builder.field("field", field); + builder.field("value", value); + builder.endObject(); + builder.endObject(); + return + new PipelineConfiguration( + pipelineId, BytesReference.bytes(builder), builder.contentType() + ); + } + + private Map createPipelineConfigMap() throws IOException { + int numPipelines = randomInt(5); + Map pipelinesMap = new HashMap<>(); + for (int i=0; i pipelinesMap = createPipelineConfigMap(); + GetPipelineResponse response = new GetPipelineResponse(new ArrayList<>(pipelinesMap.values())); + XContentBuilder builder = response.toXContent(getRandomXContentBuilder(), ToXContent.EMPTY_PARAMS); + XContentParser parser = + builder + .generator() + .contentType() + .xContent() + .createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + BytesReference.bytes(builder).streamInput() + ); + GetPipelineResponse parsedResponse = GetPipelineResponse.fromXContent(parser); + List actualPipelines = response.pipelines(); + List parsedPipelines = parsedResponse.pipelines(); + assertEquals(actualPipelines.size(), parsedPipelines.size()); + for (PipelineConfiguration pipeline: parsedPipelines) { + assertTrue(pipelinesMap.containsKey(pipeline.getId())); + assertEquals(pipelinesMap.get(pipeline.getId()).getConfigAsMap(), pipeline.getConfigAsMap()); + } + } + + @Override + protected GetPipelineResponse doParseInstance(XContentParser parser) throws IOException { + return GetPipelineResponse.fromXContent(parser); + } + + @Override + protected GetPipelineResponse createBlankInstance() { + return new GetPipelineResponse(); + } + + @Override + protected GetPipelineResponse createTestInstance() { + try { + return new GetPipelineResponse(new ArrayList<>(createPipelineConfigMap().values())); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected GetPipelineResponse mutateInstance(GetPipelineResponse response) { + try { + List clonePipelines = new ArrayList<>(response.pipelines()); + clonePipelines.add(createRandomPipeline("pipeline_" + clonePipelines.size() + 1)); + return new GetPipelineResponse(clonePipelines); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} From 7d7d2f40da80643d90e18b88cb86e5e2f6b7dec1 Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Fri, 1 Jun 2018 15:17:35 +0100 Subject: [PATCH 12/18] Ensure that index_prefixes settings cannot be changed (#30967) --- .../index/mapper/TextFieldMapper.java | 34 ++++++++----------- .../index/mapper/TextFieldTypeTests.java | 15 ++++---- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 0e5d0fb131e15..7d0aa155a3f43 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -289,22 +289,6 @@ public String toString() { return super.toString() + ",prefixChars=" + minChars + ":" + maxChars; } - @Override - public void checkCompatibility(MappedFieldType other, List conflicts, boolean strict) { - super.checkCompatibility(other, conflicts, strict); - if (strict) { - PrefixFieldType otherFieldType = (PrefixFieldType) other; - if (otherFieldType.minChars != this.minChars) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[index_prefixes.min_chars] across all types."); - } - if (otherFieldType.maxChars != this.maxChars) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[index_prefixes.max_chars] across all types."); - } - } - } - @Override public Query existsQuery(QueryShardContext context) { throw new UnsupportedOperationException(); @@ -408,6 +392,19 @@ public void checkCompatibility(MappedFieldType other, List conflicts, boolean strict) { super.checkCompatibility(other, conflicts, strict); TextFieldType otherType = (TextFieldType) other; + if (Objects.equals(this.prefixFieldType, otherType.prefixFieldType) == false) { + if (this.prefixFieldType == null) { + conflicts.add("mapper [" + name() + + "] has different [index_prefixes] settings, cannot change from disabled to enabled"); + } + else if (otherType.prefixFieldType == null) { + conflicts.add("mapper [" + name() + + "] has different [index_prefixes] settings, cannot change from enabled to disabled"); + } + else { + conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings"); + } + } if (strict) { if (fielddata() != otherType.fielddata()) { conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] " @@ -425,10 +422,6 @@ public void checkCompatibility(MappedFieldType other, conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " + "[fielddata_frequency_filter.min_segment_size] across all types."); } - if (Objects.equals(this.prefixFieldType, ((TextFieldType) other).prefixFieldType) == false) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update " - + "[index_prefixes] across all types."); - } } } @@ -521,6 +514,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { } return new PagedBytesIndexFieldData.Builder(fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); } + } private Boolean includeInAll; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java index 815e946e023d8..d0eacfad44056 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java @@ -18,23 +18,20 @@ */ package org.elasticsearch.index.mapper; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.TextFieldMapper; import org.junit.Before; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + public class TextFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -71,7 +68,7 @@ public void modify(MappedFieldType ft) { tft.setFielddataMinSegmentSize(1000); } }); - addModifier(new Modifier("index_prefixes", true) { + addModifier(new Modifier("index_prefixes", false) { @Override public void modify(MappedFieldType ft) { TextFieldMapper.TextFieldType tft = (TextFieldMapper.TextFieldType)ft; From 1f4e775fe7d0e128b939904475192c7205bfb27c Mon Sep 17 00:00:00 2001 From: Sohaib Iftikhar Date: Fri, 1 Jun 2018 14:13:41 +0200 Subject: [PATCH 13/18] REST high-level client: add delete ingest pipeline API (#30865) Relates to #27205 --- .../elasticsearch/client/ClusterClient.java | 37 +++++++-- .../client/RequestConverters.java | 15 ++++ .../elasticsearch/client/ClusterClientIT.java | 18 ++++- .../client/RequestConvertersTests.java | 16 ++++ .../ClusterClientDocumentationIT.java | 81 +++++++++++++++++-- .../cluster/delete_pipeline.asciidoc | 80 ++++++++++++++++++ .../high-level/cluster/put_pipeline.asciidoc | 10 +-- .../high-level/supported-apis.asciidoc | 2 + .../action/ingest/PutPipelineResponse.java | 62 -------------- .../action/ingest/WritePipelineResponse.java | 16 +++- .../ingest/PutPipelineResponseTests.java | 53 ------------ .../ingest/WritePipelineResponseTests.java | 32 +++++++- 12 files changed, 286 insertions(+), 136 deletions(-) create mode 100644 docs/java-rest/high-level/cluster/delete_pipeline.asciidoc delete mode 100644 server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java delete mode 100644 server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index 34441d4160f4c..4254b132b5776 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -23,10 +23,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineResponse; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.WritePipelineResponse; import java.io.IOException; @@ -74,9 +75,9 @@ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsR * See * Put Pipeline API on elastic.co */ - public PutPipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException { + public WritePipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, - PutPipelineResponse::fromXContent, emptySet(), headers); + WritePipelineResponse::fromXContent, emptySet(), headers); } /** @@ -85,9 +86,9 @@ public PutPipelineResponse putPipeline(PutPipelineRequest request, Header... hea * See * Put Pipeline API on elastic.co */ - public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) { + public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, - PutPipelineResponse::fromXContent, listener, emptySet(), headers); + WritePipelineResponse::fromXContent, listener, emptySet(), headers); } /** @@ -111,4 +112,28 @@ public void getPipelineAsync(GetPipelineRequest request, ActionListener + * See + * + * Delete Pipeline API on elastic.co + */ + public WritePipelineResponse deletePipeline(DeletePipelineRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline, + WritePipelineResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously delete an existing pipeline + *

+ * See + * + * Delete Pipeline API on elastic.co + */ + public void deletePipelineAsync(DeletePipelineRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, + WritePipelineResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 0e7e757f9aded..0a8323a29f5c2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -60,6 +60,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; @@ -659,6 +660,20 @@ static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOExcep return request; } + static Request deletePipeline(DeletePipelineRequest deletePipelineRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_ingest/pipeline") + .addPathPart(deletePipelineRequest.getId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withTimeout(deletePipelineRequest.timeout()); + parameters.withMasterTimeout(deletePipelineRequest.masterNodeTimeout()); + + return request; + } + static Request listTasks(ListTasksRequest listTaskRequest) { if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index caab4c282f4d4..42db51e81b74d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -25,7 +25,8 @@ import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineResponse; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -121,7 +122,7 @@ public void testPutPipeline() throws IOException { BytesReference.bytes(pipelineBuilder), pipelineBuilder.contentType()); - PutPipelineResponse putPipelineResponse = + WritePipelineResponse putPipelineResponse = execute(request, highLevelClient().cluster()::putPipeline, highLevelClient().cluster()::putPipelineAsync); assertTrue(putPipelineResponse.isAcknowledged()); } @@ -148,4 +149,17 @@ public void testGetPipeline() throws IOException { new PipelineConfiguration(id, BytesReference.bytes(pipelineBuilder), pipelineBuilder.contentType()); assertEquals(expectedConfig.getConfigAsMap(), response.pipelines().get(0).getConfigAsMap()); } + + public void testDeletePipeline() throws IOException { + String id = "some_pipeline_id"; + { + createPipeline(id); + } + + DeletePipelineRequest request = new DeletePipelineRequest(id); + + WritePipelineResponse response = + execute(request, highLevelClient().cluster()::deletePipeline, highLevelClient().cluster()::deletePipelineAsync); + assertTrue(response.isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index acd217c5ecdcb..6a7da03c3b654 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.search.ClearScrollRequest; @@ -1497,6 +1498,21 @@ public void testGetPipeline() { assertEquals(expectedParams, expectedRequest.getParameters()); } + public void testDeletePipeline() { + String pipelineId = "some_pipeline_id"; + Map expectedParams = new HashMap<>(); + DeletePipelineRequest request = new DeletePipelineRequest(pipelineId); + setRandomMasterTimeout(request, expectedParams); + setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + Request expectedRequest = RequestConverters.deletePipeline(request); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + endpoint.add("_ingest/pipeline"); + endpoint.add(pipelineId); + assertEquals(endpoint.toString(), expectedRequest.getEndpoint()); + assertEquals(HttpDelete.METHOD_NAME, expectedRequest.getMethod()); + assertEquals(expectedParams, expectedRequest.getParameters()); + } + public void testRollover() throws IOException { RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10), randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 07785ecc03dc4..0da577f17e873 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -26,7 +26,8 @@ import org.elasticsearch.action.ingest.GetPipelineRequest; import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.PutPipelineResponse; +import org.elasticsearch.action.ingest.DeletePipelineRequest; +import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -212,7 +213,7 @@ public void testPutPipeline() throws IOException { // end::put-pipeline-request-masterTimeout // tag::put-pipeline-execute - PutPipelineResponse response = client.cluster().putPipeline(request); // <1> + WritePipelineResponse response = client.cluster().putPipeline(request); // <1> // end::put-pipeline-execute // tag::put-pipeline-response @@ -236,10 +237,10 @@ public void testPutPipelineAsync() throws Exception { ); // tag::put-pipeline-execute-listener - ActionListener listener = - new ActionListener() { + ActionListener listener = + new ActionListener() { @Override - public void onResponse(PutPipelineResponse response) { + public void onResponse(WritePipelineResponse response) { // <1> } @@ -331,4 +332,74 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testDeletePipeline() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + createPipeline("my-pipeline-id"); + } + + { + // tag::delete-pipeline-request + DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id"); // <1> + // end::delete-pipeline-request + + // tag::delete-pipeline-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::delete-pipeline-request-timeout + + // tag::delete-pipeline-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::delete-pipeline-request-masterTimeout + + // tag::delete-pipeline-execute + WritePipelineResponse response = client.cluster().deletePipeline(request); // <1> + // end::delete-pipeline-execute + + // tag::delete-pipeline-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::delete-pipeline-response + assertTrue(acknowledged); + } + } + + public void testDeletePipelineAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createPipeline("my-pipeline-id"); + } + + { + DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id"); + + // tag::delete-pipeline-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(WritePipelineResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-pipeline-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::delete-pipeline-execute-async + client.cluster().deletePipelineAsync(request, listener); // <1> + // end::delete-pipeline-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/docs/java-rest/high-level/cluster/delete_pipeline.asciidoc b/docs/java-rest/high-level/cluster/delete_pipeline.asciidoc new file mode 100644 index 0000000000000..f809f831f7814 --- /dev/null +++ b/docs/java-rest/high-level/cluster/delete_pipeline.asciidoc @@ -0,0 +1,80 @@ +[[java-rest-high-cluster-delete-pipeline]] +=== Delete Pipeline API + +[[java-rest-high-cluster-delete-pipeline-request]] +==== Delete Pipeline Request + +A `DeletePipelineRequest` requires a pipeline `id` to delete. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-request] +-------------------------------------------------- +<1> The pipeline id to delete + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the pipeline deletion as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the pipeline deletion as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-cluster-delete-pipeline-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-execute] +-------------------------------------------------- +<1> Execute the request and get back the response in a `WritePipelineResponse` object. + +[[java-rest-high-cluster-delete-pipeline-async]] +==== Asynchronous Execution + +The asynchronous execution of a delete pipeline request requires both the `DeletePipelineRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-execute-async] +-------------------------------------------------- +<1> The `DeletePipelineRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `WritePipelineResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-delete-pipeline-response]] +==== Delete Pipeline Response + +The returned `WritePipelineResponse` allows to retrieve information about the executed + operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[delete-pipeline-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request diff --git a/docs/java-rest/high-level/cluster/put_pipeline.asciidoc b/docs/java-rest/high-level/cluster/put_pipeline.asciidoc index d50a6741cc0a9..942b75b74cd0b 100644 --- a/docs/java-rest/high-level/cluster/put_pipeline.asciidoc +++ b/docs/java-rest/high-level/cluster/put_pipeline.asciidoc @@ -22,8 +22,8 @@ The following arguments can optionally be provided: -------------------------------------------------- include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-timeout] -------------------------------------------------- -<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue` -<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String` +<1> Timeout to wait for the all the nodes to acknowledge the pipeline creation as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the pipeline creation as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -39,7 +39,7 @@ include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-reque -------------------------------------------------- include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute] -------------------------------------------------- -<1> Execute the request and get back the response in a PutPipelineResponse object. +<1> Execute the request and get back the response in a WritePipelineResponse object. [[java-rest-high-cluster-put-pipeline-async]] ==== Asynchronous Execution @@ -60,7 +60,7 @@ completed the `ActionListener` is called back using the `onResponse` method if the execution successfully completed or using the `onFailure` method if it failed. -A typical listener for `PutPipelineResponse` looks like: +A typical listener for `WritePipelineResponse` looks like: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -73,7 +73,7 @@ provided as an argument [[java-rest-high-cluster-put-pipeline-response]] ==== Put Pipeline Response -The returned `PutPipelineResponse` allows to retrieve information about the executed +The returned `WritePipelineResponse` allows to retrieve information about the executed operation as follows: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index a196a44762f7f..534d161abc3cf 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -108,10 +108,12 @@ The Java High Level REST Client supports the following Cluster APIs: * <> * <> * <> +* <> include::cluster/put_settings.asciidoc[] include::cluster/put_pipeline.asciidoc[] include::cluster/get_pipeline.asciidoc[] +include::cluster/delete_pipeline.asciidoc[] == Snapshot APIs diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java deleted file mode 100644 index 13960ca99ef7e..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineResponse.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -public class PutPipelineResponse extends AcknowledgedResponse implements ToXContentObject { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_put_pipeline", true, args -> new PutPipelineResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - - public PutPipelineResponse() { - } - - public PutPipelineResponse(boolean acknowledged) { - super(acknowledged); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - readAcknowledged(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - writeAcknowledged(out); - } - - public static PutPipelineResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java index ced6085e0acc8..36301d6735a02 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java @@ -22,10 +22,20 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; -public class WritePipelineResponse extends AcknowledgedResponse { +public class WritePipelineResponse extends AcknowledgedResponse implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "write_pipeline_response", true, args -> new WritePipelineResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } WritePipelineResponse() { @@ -46,4 +56,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + public static WritePipelineResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java deleted file mode 100644 index 438d3e550442c..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/ingest/PutPipelineResponseTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.ingest; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; - -public class PutPipelineResponseTests extends AbstractStreamableXContentTestCase { - - public void testToXContent() { - PutPipelineResponse response = new PutPipelineResponse(true); - String output = Strings.toString(response); - assertEquals("{\"acknowledged\":true}", output); - } - - @Override - protected PutPipelineResponse doParseInstance(XContentParser parser) { - return PutPipelineResponse.fromXContent(parser); - } - - @Override - protected PutPipelineResponse createTestInstance() { - return new PutPipelineResponse(randomBoolean()); - } - - @Override - protected PutPipelineResponse createBlankInstance() { - return new PutPipelineResponse(); - } - - @Override - protected PutPipelineResponse mutateInstance(PutPipelineResponse response) { - return new PutPipelineResponse(response.isAcknowledged() == false); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java index 00327603ba804..f68545c3f7921 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/WritePipelineResponseTests.java @@ -19,15 +19,17 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.io.IOException; import static org.hamcrest.CoreMatchers.equalTo; -public class WritePipelineResponseTests extends ESTestCase { +public class WritePipelineResponseTests extends AbstractStreamableXContentTestCase { public void testSerializationWithoutError() throws IOException { boolean isAcknowledged = randomBoolean(); @@ -52,4 +54,30 @@ public void testSerializationWithError() throws IOException { assertThat(otherResponse.isAcknowledged(), equalTo(response.isAcknowledged())); } + + public void testToXContent() { + WritePipelineResponse response = new WritePipelineResponse(true); + String output = Strings.toString(response); + assertEquals("{\"acknowledged\":true}", output); + } + + @Override + protected WritePipelineResponse doParseInstance(XContentParser parser) { + return WritePipelineResponse.fromXContent(parser); + } + + @Override + protected WritePipelineResponse createTestInstance() { + return new WritePipelineResponse(randomBoolean()); + } + + @Override + protected WritePipelineResponse createBlankInstance() { + return new WritePipelineResponse(); + } + + @Override + protected WritePipelineResponse mutateInstance(WritePipelineResponse response) { + return new WritePipelineResponse(response.isAcknowledged() == false); + } } From 01c065455848158b47e5835b766189e1a6391c65 Mon Sep 17 00:00:00 2001 From: lipsill <39668292+lipsill@users.noreply.github.com> Date: Fri, 1 Jun 2018 18:15:24 +0200 Subject: [PATCH 14/18] [Docs] Fix a typo in Create Index naming limitation (#30891) --- docs/reference/indices/create-index.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 680eeb62151d0..c1b2829bdb9cf 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -23,7 +23,7 @@ There are several limitations to what you can name your index. The complete lis - Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, `#` - Indices prior to 7.0 could contain a colon (`:`), but that's been deprecated and won't be supported in 7.0+ - Cannot start with `-`, `_`, `+` -- Cannot be `.` or ``..` +- Cannot be `.` or `..` - Cannot be longer than 255 bytes (note it is bytes, so multi-byte characters will count towards the 255 limit faster) ====================================================== From 43dde4d09ef2833a7713241768be7ab91ba3d961 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 1 Jun 2018 15:00:41 -0400 Subject: [PATCH 15/18] Fix handling of percent-encoded spaces in Windows batch files (#31034) If you invoke elasticsearch-plugin (or any other CLI script on Windows) with a path that has a percent-encoded space (or any other percent-encoded character) because the CLI scripts now shell into a common shell script (elasticsearch-cli) the percent-encoded space ends up being interpreted as a parameter. For example passing install --batch file:/c:/encoded%20%space/analysis-icu-7.0.0.zip to elasticsearch-plugin leads to the %20 being interpreted as %2 followed by a zero. Here, the %2 is interpreted as the second parameter (--batch) and the InstallPluginCommand class ends up seeing file:/c/encoded--batch0space/analysis-icu-7.0.0.zip as the path which will not exist. This commit addresses this by escaping the %* that is used to pass the parameters to the common CLI script so that the common script sees the correct parameters without the %2 being substituted. --- distribution/src/bin/elasticsearch-keystore.bat | 2 +- distribution/src/bin/elasticsearch-plugin.bat | 2 +- distribution/src/bin/elasticsearch-translog.bat | 2 +- x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat | 2 +- x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat | 2 +- x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat | 2 +- .../security/src/main/bin/elasticsearch-saml-metadata.bat | 2 +- .../security/src/main/bin/elasticsearch-setup-passwords.bat | 2 +- x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat | 2 +- x-pack/plugin/security/src/main/bin/elasticsearch-users.bat | 2 +- x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index 9bd72a65745a9..380a3e501d57e 100644 --- a/distribution/src/bin/elasticsearch-keystore.bat +++ b/distribution/src/bin/elasticsearch-keystore.bat @@ -5,7 +5,7 @@ setlocal enableextensions call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.common.settings.KeyStoreCli ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index c9a8e9748f149..5d7b1d7a8283d 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -6,7 +6,7 @@ setlocal enableextensions set ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.plugins.PluginCli ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/distribution/src/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat index 37d96bbed6c4e..9c4cefcf2fe64 100644 --- a/distribution/src/bin/elasticsearch-translog.bat +++ b/distribution/src/bin/elasticsearch-translog.bat @@ -5,7 +5,7 @@ setlocal enableextensions call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.index.translog.TranslogToolCli ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat index 8c8a0c69f5626..01f3c0f21cd77 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.core.ssl.CertificateGenerateTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat index f898f885ce0a3..f8a5fd9880a62 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.core.ssl.CertificateTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat index f9486979e6bc3..67faf2ea66afc 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat index 4ddb8da3ff143..6cdd539a81da3 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat index f380e5f55271f..e3ea134ae43b3 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat index 1eff4aad8251e..570eef619ec28 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat index 7f7347d706ff5..2975fbe87b976 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.file.tool.UsersTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat index 37ca14dd094cc..281b06cf77b17 100644 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat @@ -10,7 +10,7 @@ setlocal enableextensions set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-watcher-env call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool ^ - %* ^ + %%* ^ || exit /b 1 endlocal From 3e609b96a3611855937942546b28a874e19a376d Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 1 Jun 2018 15:05:37 -0400 Subject: [PATCH 16/18] [DOCS] Make geoshape docs less memory hungry (#31014) Reduces shape size and precision in geo shape mapper examples to reduce amount of memory required to check docs. Fixes #23836 --- .../mapping/types/geo-shape.asciidoc | 28 ++++++------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 43ad71e37073f..f83501a508d03 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -175,7 +175,7 @@ PUT /example "location": { "type": "geo_shape", "tree": "quadtree", - "precision": "1m" + "precision": "100m" } } } @@ -186,8 +186,8 @@ PUT /example // TESTSETUP This mapping maps the location field to the geo_shape type using the -quad_tree implementation and a precision of 1m. Elasticsearch translates -this into a tree_levels setting of 26. +quad_tree implementation and a precision of 100m. Elasticsearch translates +this into a tree_levels setting of 20. [float] ===== Performance considerations @@ -364,7 +364,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] The following is an example of a Polygon with a hole in WKT: @@ -376,7 +375,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] *IMPORTANT NOTE:* WKT does not enforce a specific order for vertices thus ambiguous polygons around the dateline and poles are possible. @@ -411,7 +409,7 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] +// TEST[catch:/mapper_parsing_exception/] An `orientation` parameter can be defined when setting the geo_shape mapping (see <>). This will define vertex order for the coordinate list on the mapped geo_shape field. It can also be overridden on each document. The following is an example for @@ -425,14 +423,12 @@ POST /example/doc "type" : "polygon", "orientation" : "clockwise", "coordinates" : [ - [ [-177.0, 10.0], [176.0, 15.0], [172.0, 0.0], [176.0, -15.0], [-177.0, -10.0], [-177.0, 10.0] ], - [ [178.2, 8.2], [-178.8, 8.2], [-180.8, -8.8], [178.2, 8.8] ] + [ [100.0, 0.0], [100.0, 1.0], [101.0, 1.0], [101.0, 0.0], [100.0, 0.0] ] ] } } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== http://www.geojson.org/geojson-spec.html#id5[MultiPoint] @@ -484,7 +480,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] The following is an example of a list of WKT linestrings: @@ -496,7 +491,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== http://www.geojson.org/geojson-spec.html#id7[MultiPolygon] @@ -518,7 +512,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] The following is an example of a list of WKT polygons (second polygon contains a hole): @@ -530,7 +523,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] @@ -557,7 +549,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] The following is an example of a collection of WKT geometry objects: @@ -569,7 +560,6 @@ POST /example/doc } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] @@ -585,12 +575,11 @@ POST /example/doc { "location" : { "type" : "envelope", - "coordinates" : [ [-45.0, 45.0], [45.0, -45.0] ] + "coordinates" : [ [100.0, 1.0], [101.0, 0.0] ] } } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] The following is an example of an envelope using the WKT BBOX format: @@ -600,11 +589,10 @@ The following is an example of an envelope using the WKT BBOX format: -------------------------------------------------- POST /example/doc { - "location" : "BBOX (-45.0, 45.0, 45.0, -45.0)" + "location" : "BBOX (100.0, 102.0, 2.0, 0.0)" } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== Circle @@ -618,7 +606,7 @@ POST /example/doc { "location" : { "type" : "circle", - "coordinates" : [-45.0, 45.0], + "coordinates" : [101.0, 1.0], "radius" : "100m" } } From b194568c0918dd1bf2df809e07d04ac16eac8407 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 1 Jun 2018 11:45:35 -0400 Subject: [PATCH 17/18] Introduce client feature tracking (#31020) This commit introduces the ability for a client to communicate to the server features that it can support and for these features to be used in influencing the decisions that the server makes when communicating with the client. To this end we carry the features from the client to the underlying stream as we carry the version of the client today. This enables us to enhance the logic where we make protocol decisions on the basis of the version on the stream to also make protocol decisions on the basis of the features on the stream. With such functionality, the client can communicate to the server if it is a transport client, or if it has, for example, X-Pack installed. This enables us to support rolling upgrades from the OSS distribution to the default distribution without breaking client connectivity as we can now elect to serialize customs in the cluster state depending on whether or not the client reports to us using the feature capabilities that it can under these customs. This means that we would avoid sending a client pieces of the cluster state that it can not understand. However, we want to take care and always send the full cluster state during node-to-node communication as otherwise we would end up with different understanding of what is in the cluster state across nodes depending on which features they reported to have. This is why when deciding whether or not to write out a custom we always send the custom if the client is not a transport client and otherwise do not send the custom if the client is transport client that does not report to have the feature required by the custom. Co-authored-by: Yannick Welsch --- .../transport/netty4/ESLoggingHandler.java | 4 + .../client/transport/TransportClient.java | 10 +- .../elasticsearch/cluster/ClusterState.java | 66 +++- .../cluster/metadata/MetaData.java | 13 +- .../common/io/stream/StreamOutput.java | 26 ++ .../common/settings/ClusterSettings.java | 1 + .../PersistentTasksCustomMetaData.java | 1 + .../org/elasticsearch/plugins/Plugin.java | 13 + .../elasticsearch/plugins/PluginsService.java | 25 +- .../elasticsearch/transport/TcpTransport.java | 72 +++- .../transport/TcpTransportChannel.java | 14 +- .../transport/TransportClientTests.java | 21 +- .../elasticsearch/cluster/ClusterStateIT.java | 340 ++++++++++++++++++ .../cluster/FeatureAwareTests.java | 175 +++++++++ .../transport/TcpTransportTests.java | 1 + .../elasticsearch/test/ESIntegTestCase.java | 98 ++++- .../test/client/RandomizingClient.java | 4 + .../AbstractSimpleTransportTestCase.java | 2 +- .../license/LicensesMetaData.java | 3 +- .../xpack/core/XPackClientPlugin.java | 27 +- .../elasticsearch/xpack/core/XPackPlugin.java | 25 +- .../xpack/core/ml/MlMetadata.java | 3 +- .../core/security/authc/TokenMetaData.java | 3 +- .../xpack/core/watcher/WatcherMetaData.java | 3 +- 24 files changed, 886 insertions(+), 64 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java create mode 100644 server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java index 47a31f268a6a8..383fe6d2ac1b8 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java @@ -104,6 +104,10 @@ else if (readableBytes >= TcpHeader.HEADER_SIZE) { try (ThreadContext context = new ThreadContext(Settings.EMPTY)) { context.readHeaders(in); } + // now we decode the features + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + in.readStringArray(); + } // now we can decode the action name sb.append(", action: ").append(in.readString()); } diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index cfba3d58ee11f..1c810a3e040a8 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -97,6 +97,8 @@ public abstract class TransportClient extends AbstractClient { public static final Setting CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, Setting.Property.NodeScope); + public static final String TRANSPORT_CLIENT_FEATURE = "transport_client"; + private static PluginsService newPluginService(final Settings settings, Collection> plugins) { final Settings.Builder settingsBuilder = Settings.builder() .put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval @@ -129,8 +131,12 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build(); } final PluginsService pluginsService = newPluginService(providedSettings, plugins); - final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).put(ThreadContext.PREFIX - + "." + "transport_client", true).build(); + final Settings settings = + Settings.builder() + .put(defaultSettings) + .put(pluginsService.updatedSettings()) + .put(TcpTransport.FEATURE_PREFIX + "." + TRANSPORT_CLIENT_FEATURE, true) + .build(); final List resourcesToClose = new ArrayList<>(); final ThreadPool threadPool = new ThreadPool(settings); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 2b991d1dc611a..6bc555eae0bd9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -61,6 +62,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.Set; /** @@ -90,7 +92,51 @@ public class ClusterState implements ToXContentFragment, Diffable public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); - public interface Custom extends NamedDiffable, ToXContentFragment { + /** + * An interface that implementors use when a class requires a client to maybe have a feature. + */ + public interface FeatureAware { + + /** + * An optional feature that is required for the client to have. + * + * @return an empty optional if no feature is required otherwise a string representing the required feature + */ + default Optional getRequiredFeature() { + return Optional.empty(); + } + + /** + * Tests whether or not the custom should be serialized. The criteria are: + *

    + *
  • the output stream must be at least the minimum supported version of the custom
  • + *
  • the output stream must have the feature required by the custom (if any) or not be a transport client
  • + *
+ *

+ * That is, we only serialize customs to clients than can understand the custom based on the version of the client and the features + * that the client has. For transport clients we can be lenient in requiring a feature in which case we do not send the custom but + * for connected nodes we always require that the node has the required feature. + * + * @param out the output stream + * @param custom the custom to serialize + * @param the type of the custom + * @return true if the custom should be serialized and false otherwise + */ + static boolean shouldSerializeCustom(final StreamOutput out, final T custom) { + if (out.getVersion().before(custom.getMinimalSupportedVersion())) { + return false; + } + if (custom.getRequiredFeature().isPresent()) { + final String requiredFeature = custom.getRequiredFeature().get(); + // if it is a transport client we are lenient yet for a connected node it must have the required feature + return out.hasFeature(requiredFeature) || out.hasFeature(TransportClient.TRANSPORT_CLIENT_FEATURE) == false; + } + return true; + } + + } + + public interface Custom extends NamedDiffable, ToXContentFragment, FeatureAware { /** * Returns true iff this {@link Custom} is private to the cluster and should never be send to a client. @@ -99,6 +145,7 @@ public interface Custom extends NamedDiffable, ToXContentFragment { default boolean isPrivate() { return false; } + } private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); @@ -244,6 +291,15 @@ public String toString() { sb.append("isa_ids ").append(indexMetaData.inSyncAllocationIds(shard)).append("\n"); } } + if (metaData.customs().isEmpty() == false) { + sb.append("metadata customs:\n"); + for (final ObjectObjectCursor cursor : metaData.customs()) { + final String type = cursor.key; + final MetaData.Custom custom = cursor.value; + sb.append(TAB).append(type).append(": ").append(custom); + } + sb.append("\n"); + } sb.append(blocks()); sb.append(nodes()); sb.append(routingTable()); @@ -691,14 +747,14 @@ public void writeTo(StreamOutput out) throws IOException { blocks.writeTo(out); // filter out custom states not supported by the other node int numberOfCustoms = 0; - for (ObjectCursor cursor : customs.values()) { - if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + for (final ObjectCursor cursor : customs.values()) { + if (FeatureAware.shouldSerializeCustom(out, cursor.value)) { numberOfCustoms++; } } out.writeVInt(numberOfCustoms); - for (ObjectCursor cursor : customs.values()) { - if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + for (final ObjectCursor cursor : customs.values()) { + if (FeatureAware.shouldSerializeCustom(out, cursor.value)) { out.writeNamedWriteable(cursor.value); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index a569bb9a36e29..bb5e8e6fa48b2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -24,6 +24,8 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterState.FeatureAware; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; @@ -117,9 +119,10 @@ public enum XContentContext { */ public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); - public interface Custom extends NamedDiffable, ToXContentFragment { + public interface Custom extends NamedDiffable, ToXContentFragment, ClusterState.FeatureAware { EnumSet context(); + } public static final Setting SETTING_READ_ONLY_SETTING = @@ -789,14 +792,14 @@ public void writeTo(StreamOutput out) throws IOException { } // filter out custom states not supported by the other node int numberOfCustoms = 0; - for (ObjectCursor cursor : customs.values()) { - if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + for (final ObjectCursor cursor : customs.values()) { + if (FeatureAware.shouldSerializeCustom(out, cursor.value)) { numberOfCustoms++; } } out.writeVInt(numberOfCustoms); - for (ObjectCursor cursor : customs.values()) { - if (out.getVersion().onOrAfter(cursor.value.getMinimalSupportedVersion())) { + for (final ObjectCursor cursor : customs.values()) { + if (FeatureAware.shouldSerializeCustom(out, cursor.value)) { out.writeNamedWriteable(cursor.value); } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 5c31fead1e074..8b05516c5a9a7 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -30,6 +30,8 @@ import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; @@ -58,10 +60,12 @@ import java.util.EnumMap; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.IntFunction; @@ -98,6 +102,7 @@ public abstract class StreamOutput extends OutputStream { } private Version version = Version.CURRENT; + private Set features = Collections.emptySet(); /** * The version of the node on the other side of this stream. @@ -113,6 +118,27 @@ public void setVersion(Version version) { this.version = version; } + /** + * Test if the stream has the specified feature. Features are used when serializing {@link ClusterState.Custom} or + * {@link MetaData.Custom}; see also {@link ClusterState.FeatureAware}. + * + * @param feature the feature to test + * @return true if the stream has the specified feature + */ + public boolean hasFeature(final String feature) { + return this.features.contains(feature); + } + + /** + * Set the features on the stream. See {@link StreamOutput#hasFeature(String)}. + * + * @param features the features on the stream + */ + public void setFeatures(final Set features) { + assert this.features.isEmpty() : this.features; + this.features = Collections.unmodifiableSet(new HashSet<>(features)); + } + public long position() throws IOException { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 12ef7c553fdb0..d39440e66dfd0 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -383,6 +383,7 @@ public void apply(Settings value, Settings current, Settings previous) { ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, EsExecutors.PROCESSORS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING, + TcpTransport.DEFAULT_FEATURES_SETTING, Loggers.LOG_DEFAULT_LEVEL_SETTING, Loggers.LOG_LEVEL_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index 6d2c21a764ad5..bdee87cc77c51 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index 82c8bf1bbcb18..0ef703448b799 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -56,6 +57,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.UnaryOperator; /** @@ -79,6 +81,17 @@ */ public abstract class Plugin implements Closeable { + /** + * A feature exposed by the plugin. This should be used if a plugin exposes {@link ClusterState.Custom} or {@link MetaData.Custom}; see + * also {@link ClusterState.FeatureAware}. + * + * @return a feature set represented by this plugin, or the empty optional if the plugin does not expose cluster state or metadata + * customs + */ + protected Optional getFeature() { + return Optional.empty(); + } + /** * Node level guice modules. */ diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 68a19bb9bca9b..5b64b5be6390d 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -41,8 +41,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexModule; import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.transport.TcpTransport; import java.io.IOException; import java.lang.reflect.Constructor; @@ -57,16 +59,17 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; @@ -196,6 +199,7 @@ private static void logPluginInfo(final List pluginInfos, final Stri public Settings updatedSettings() { Map foundSettings = new HashMap<>(); + final Map features = new TreeMap<>(); final Settings.Builder builder = Settings.builder(); for (Tuple plugin : plugins) { Settings settings = plugin.v2().additionalSettings(); @@ -207,6 +211,23 @@ public Settings updatedSettings() { } } builder.put(settings); + final Optional maybeFeature = plugin.v2().getFeature(); + if (maybeFeature.isPresent()) { + final String feature = maybeFeature.get(); + if (features.containsKey(feature)) { + final String message = String.format( + Locale.ROOT, + "duplicate feature [%s] in plugin [%s], already added in [%s]", + feature, + plugin.v1().getName(), + features.get(feature)); + throw new IllegalArgumentException(message); + } + features.put(feature, plugin.v1().getName()); + } + } + for (final String feature : features.keySet()) { + builder.put(TcpTransport.FEATURE_PREFIX + "." + feature, true); } return builder.put(this.settings).build(); } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index d588ec9561af0..2aa3fcd7fc64a 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.Booleans; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -93,6 +94,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; @@ -185,6 +187,11 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); public static final int PING_DATA_SIZE = -1; + + public static final String FEATURE_PREFIX = "transport.features"; + public static final Setting DEFAULT_FEATURES_SETTING = Setting.groupSetting(FEATURE_PREFIX + ".", Setting.Property.NodeScope); + private final String[] features; + private final CircuitBreakerService circuitBreakerService; // package visibility for tests protected final ScheduledPing scheduledPing; @@ -236,6 +243,18 @@ public TcpTransport(String transportName, Settings settings, ThreadPool threadPo this.networkService = networkService; this.transportName = transportName; defaultConnectionProfile = buildDefaultConnectionProfile(settings); + final Settings defaultFeatures = DEFAULT_FEATURES_SETTING.get(settings); + if (defaultFeatures == null) { + this.features = new String[0]; + } else { + defaultFeatures.names().forEach(key -> { + if (Booleans.parseBoolean(defaultFeatures.get(key)) == false) { + throw new IllegalArgumentException("feature settings must have default [true] value"); + } + }); + // use a sorted set to present the features in a consistent order + this.features = new TreeSet<>(defaultFeatures.names()).toArray(new String[defaultFeatures.names().size()]); + } } static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { @@ -1093,6 +1112,9 @@ private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel cha stream.setVersion(version); threadPool.getThreadContext().writeTo(stream); + if (version.onOrAfter(Version.V_6_4_0)) { + stream.writeStringArray(features); + } stream.writeString(action); BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream); final TransportRequestOptions finalOptions = options; @@ -1125,15 +1147,22 @@ private void internalSendMessage(TcpChannel channel, BytesReference message, Sen * Sends back an error response to the caller via the given channel * * @param nodeVersion the caller node version + * @param features the caller features * @param channel the channel to send the response to * @param error the error to return * @param requestId the request ID this response replies to * @param action the action this response replies to */ - public void sendErrorResponse(Version nodeVersion, TcpChannel channel, final Exception error, final long requestId, - final String action) throws IOException { + public void sendErrorResponse( + final Version nodeVersion, + final Set features, + final TcpChannel channel, + final Exception error, + final long requestId, + final String action) throws IOException { try (BytesStreamOutput stream = new BytesStreamOutput()) { stream.setVersion(nodeVersion); + stream.setFeatures(features); RemoteTransportException tx = new RemoteTransportException( nodeName(), new TransportAddress(channel.getLocalAddress()), action, error); threadPool.getThreadContext().writeTo(stream); @@ -1153,15 +1182,28 @@ public void sendErrorResponse(Version nodeVersion, TcpChannel channel, final Exc /** * Sends the response to the given channel. This method should be used to send {@link TransportResponse} objects back to the caller. * - * @see #sendErrorResponse(Version, TcpChannel, Exception, long, String) for sending back errors to the caller + * @see #sendErrorResponse(Version, Set, TcpChannel, Exception, long, String) for sending back errors to the caller */ - public void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId, - final String action, TransportResponseOptions options) throws IOException { - sendResponse(nodeVersion, channel, response, requestId, action, options, (byte) 0); + public void sendResponse( + final Version nodeVersion, + final Set features, + final TcpChannel channel, + final TransportResponse response, + final long requestId, + final String action, + final TransportResponseOptions options) throws IOException { + sendResponse(nodeVersion, features, channel, response, requestId, action, options, (byte) 0); } - private void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId, - final String action, TransportResponseOptions options, byte status) throws IOException { + private void sendResponse( + final Version nodeVersion, + final Set features, + final TcpChannel channel, + final TransportResponse response, + final long requestId, + final String action, + TransportResponseOptions options, + byte status) throws IOException { if (compress) { options = TransportResponseOptions.builder(options).withCompress(true).build(); } @@ -1175,6 +1217,7 @@ private void sendResponse(Version nodeVersion, TcpChannel channel, final Transpo } threadPool.getThreadContext().writeTo(stream); stream.setVersion(nodeVersion); + stream.setFeatures(features); BytesReference message = buildMessage(requestId, status, nodeVersion, response, stream); final TransportResponseOptions finalOptions = options; @@ -1475,13 +1518,19 @@ private void handleException(final TransportResponseHandler handler, Throwable e protected String handleRequest(TcpChannel channel, String profileName, final StreamInput stream, long requestId, int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) throws IOException { + final Set features; + if (version.onOrAfter(Version.V_6_4_0)) { + features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(stream.readStringArray()))); + } else { + features = Collections.emptySet(); + } final String action = stream.readString(); transportService.onRequestReceived(requestId, action); TransportChannel transportChannel = null; try { if (TransportStatus.isHandshake(status)) { final VersionHandshakeResponse response = new VersionHandshakeResponse(getCurrentVersion()); - sendResponse(version, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, + sendResponse(version, features, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, TransportStatus.setHandshake((byte) 0)); } else { final RequestHandlerRegistry reg = transportService.getRequestHandler(action); @@ -1493,7 +1542,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str } else { getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); } - transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, + transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, features, profileName, messageLengthBytes); final TransportRequest request = reg.newRequest(stream); request.remoteAddress(new TransportAddress(remoteAddress)); @@ -1504,7 +1553,8 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str } catch (Exception e) { // the circuit breaker tripped if (transportChannel == null) { - transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, 0); + transportChannel = + new TcpTransportChannel(this, channel, transportName, action, requestId, version, features, profileName, 0); } try { transportChannel.sendResponse(e); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java b/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java index eb4c244c7a920..1bf1d027329b5 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransportChannel.java @@ -16,16 +16,20 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.transport; import org.elasticsearch.Version; import java.io.IOException; +import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; public final class TcpTransportChannel implements TransportChannel { private final TcpTransport transport; private final Version version; + private final Set features; private final String action; private final long requestId; private final String profileName; @@ -34,9 +38,10 @@ public final class TcpTransportChannel implements TransportChannel { private final String channelType; private final TcpChannel channel; - TcpTransportChannel(TcpTransport transport, TcpChannel channel, String channelType, String action, - long requestId, Version version, String profileName, long reservedBytes) { + TcpTransportChannel(TcpTransport transport, TcpChannel channel, String channelType, String action, long requestId, Version version, + Set features, String profileName, long reservedBytes) { this.version = version; + this.features = features; this.channel = channel; this.transport = transport; this.action = action; @@ -59,7 +64,7 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { try { - transport.sendResponse(version, channel, response, requestId, action, options); + transport.sendResponse(version, features, channel, response, requestId, action, options); } finally { release(false); } @@ -68,7 +73,7 @@ public void sendResponse(TransportResponse response, TransportResponseOptions op @Override public void sendResponse(Exception exception) throws IOException { try { - transport.sendErrorResponse(version, channel, exception, requestId, action); + transport.sendErrorResponse(version, features, channel, exception, requestId, action); } finally { release(true); } @@ -100,5 +105,6 @@ public Version getVersion() { public TcpChannel getChannel() { return channel; } + } diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java index 1830698d90c6f..1dc30e951b6d3 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.MockTransportClient; +import org.elasticsearch.transport.TcpTransport; import java.io.IOException; import java.util.Arrays; @@ -38,6 +39,8 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.object.HasToString.hasToString; public class TransportClientTests extends ESTestCase { @@ -64,13 +67,23 @@ public void testPluginNamedWriteablesRegistered() { } } - public void testDefaultHeaderContainsPlugins() { - Settings baseSettings = Settings.builder() + public void testSettingsContainsTransportClient() { + final Settings baseSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { - ThreadContext threadContext = client.threadPool().getThreadContext(); - assertEquals("true", threadContext.getHeader("transport_client")); + final Settings settings = TcpTransport.DEFAULT_FEATURES_SETTING.get(client.settings()); + assertThat(settings.keySet(), hasItem("transport_client")); + assertThat(settings.get("transport_client"), equalTo("true")); + } + } + + public void testDefaultHeader() { + final Settings baseSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .build(); + try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) { + final ThreadContext threadContext = client.threadPool().getThreadContext(); assertEquals("true", threadContext.getHeader("test")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java new file mode 100644 index 0000000000000..07a974a2ca771 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateIT.java @@ -0,0 +1,340 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexGraveyard; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.watcher.ResourceWatcherService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; + +/** + * This test suite sets up a situation where the cluster has two plugins installed (node, and node-and-transport-client), and a transport + * client only has node-and-transport-client plugin installed. Each of these plugins inject customs into the cluster state and we want to + * check that the client can de-serialize a cluster state response based on the fact that the response should not contain customs that the + * transport client does not understand based on the fact that it only presents the node-and-transport-client-feature. + */ +@ESIntegTestCase.ClusterScope(scope = TEST) +public class ClusterStateIT extends ESIntegTestCase { + + public abstract static class Custom implements MetaData.Custom { + + private static final ParseField VALUE = new ParseField("value"); + + private final int value; + + int value() { + return value; + } + + Custom(final int value) { + this.value = value; + } + + Custom(final StreamInput in) throws IOException { + value = in.readInt(); + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public Diff diff(final MetaData.Custom previousState) { + return null; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeInt(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(VALUE.getPreferredName(), value); + return builder; + } + + } + + public static class NodeCustom extends Custom { + + public static final String TYPE = "node"; + + NodeCustom(final int value) { + super(value); + } + + NodeCustom(final StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Optional getRequiredFeature() { + return Optional.of("node"); + } + + } + + public static class NodeAndTransportClientCustom extends Custom { + + public static final String TYPE = "node-and-transport-client"; + + NodeAndTransportClientCustom(final int value) { + super(value); + } + + public NodeAndTransportClientCustom(final StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + /* + * This custom should always be returned yet we randomize whether it has a required feature that the client is expected to have + * versus not requiring any feature. We use a field to make the random choice exactly once. + */ + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + private final Optional requiredFeature = randomBoolean() ? Optional.empty() : Optional.of("node-and-transport-client"); + + @Override + public Optional getRequiredFeature() { + return requiredFeature; + } + + } + + public abstract static class CustomPlugin extends Plugin { + + private final List namedWritables = new ArrayList<>(); + private final List namedXContents = new ArrayList<>(); + + public CustomPlugin() { + registerBuiltinWritables(); + } + + protected void registerMetaDataCustom( + final String name, final Writeable.Reader reader, final CheckedFunction parser) { + namedWritables.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, name, reader)); + namedXContents.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(name), parser)); + } + + protected abstract void registerBuiltinWritables(); + + protected abstract String getType(); + + protected abstract Custom getInstance(); + + @Override + public List getNamedWriteables() { + return namedWritables; + } + + @Override + public List getNamedXContent() { + return namedXContents; + } + + private final AtomicBoolean installed = new AtomicBoolean(); + + @Override + public Collection createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry) { + clusterService.addListener(event -> { + final ClusterState state = event.state(); + if (state.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) { + return; + } + + final MetaData metaData = state.metaData(); + if (state.nodes().isLocalNodeElectedMaster()) { + if (metaData.custom(getType()) == null) { + if (installed.compareAndSet(false, true)) { + clusterService.submitStateUpdateTask("install-metadata-custom", new ClusterStateUpdateTask(Priority.URGENT) { + + @Override + public ClusterState execute(ClusterState currentState) { + if (currentState.custom(getType()) == null) { + final MetaData.Builder builder = MetaData.builder(currentState.metaData()); + builder.putCustom(getType(), getInstance()); + return ClusterState.builder(currentState).metaData(builder).build(); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + throw new AssertionError(e); + } + + }); + } + } + } + + }); + return Collections.emptyList(); + } + } + + public static class NodePlugin extends CustomPlugin { + + public Optional getFeature() { + return Optional.of("node"); + } + + static final int VALUE = randomInt(); + + @Override + protected void registerBuiltinWritables() { + registerMetaDataCustom( + NodeCustom.TYPE, + NodeCustom::new, + parser -> { + throw new IOException(new UnsupportedOperationException()); + }); + } + + @Override + protected String getType() { + return NodeCustom.TYPE; + } + + @Override + protected Custom getInstance() { + return new NodeCustom(VALUE); + } + + } + + public static class NodeAndTransportClientPlugin extends CustomPlugin { + + @Override + protected Optional getFeature() { + return Optional.of("node-and-transport-client"); + } + + static final int VALUE = randomInt(); + + @Override + protected void registerBuiltinWritables() { + registerMetaDataCustom( + NodeAndTransportClientCustom.TYPE, + NodeAndTransportClientCustom::new, + parser -> { + throw new IOException(new UnsupportedOperationException()); + }); + } + + @Override + protected String getType() { + return NodeAndTransportClientCustom.TYPE; + } + + @Override + protected Custom getInstance() { + return new NodeAndTransportClientCustom(VALUE); + } + + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(NodePlugin.class, NodeAndTransportClientPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(NodeAndTransportClientPlugin.class); + } + + public void testOptionalCustoms() throws Exception { + // ensure that the customs are injected into the cluster state + assertBusy(() -> assertTrue(clusterService().state().metaData().customs().containsKey(NodeCustom.TYPE))); + assertBusy(() -> assertTrue(clusterService().state().metaData().customs().containsKey(NodeAndTransportClientCustom.TYPE))); + final ClusterStateResponse state = internalCluster().transportClient().admin().cluster().prepareState().get(); + final ImmutableOpenMap customs = state.getState().metaData().customs(); + final Set keys = new HashSet<>(Arrays.asList(customs.keys().toArray(String.class))); + assertThat(keys, hasItem(IndexGraveyard.TYPE)); + assertThat(keys, not(hasItem(NodeCustom.TYPE))); + assertThat(keys, hasItem(NodeAndTransportClientCustom.TYPE)); + final MetaData.Custom actual = customs.get(NodeAndTransportClientCustom.TYPE); + assertThat(actual, instanceOf(NodeAndTransportClientCustom.class)); + assertThat(((NodeAndTransportClientCustom)actual).value(), equalTo(NodeAndTransportClientPlugin.VALUE)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java b/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java new file mode 100644 index 0000000000000..696bf2f82faaa --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/FeatureAwareTests.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterState.FeatureAware; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.Optional; + +import static org.elasticsearch.test.VersionUtils.randomVersionBetween; + +public class FeatureAwareTests extends ESTestCase { + + abstract static class Custom implements MetaData.Custom { + + private final Version version; + + Custom(final Version version) { + this.version = version; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public Diff diff(final MetaData.Custom previousState) { + return null; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + @Override + public Version getMinimalSupportedVersion() { + return version; + } + + } + + static class NoRequiredFeatureCustom extends Custom { + + NoRequiredFeatureCustom(final Version version) { + super(version); + } + + @Override + public String getWriteableName() { + return "no-required-feature"; + } + + } + + static class RequiredFeatureCustom extends Custom { + + RequiredFeatureCustom(final Version version) { + super(version); + } + + @Override + public String getWriteableName() { + return null; + } + + @Override + public Optional getRequiredFeature() { + return Optional.of("required-feature"); + } + + } + + public void testVersion() { + final Version version = VersionUtils.randomVersion(random()); + for (final Custom custom : Arrays.asList(new NoRequiredFeatureCustom(version), new RequiredFeatureCustom(version))) { + { + final BytesStreamOutput out = new BytesStreamOutput(); + final Version afterVersion = randomVersionBetween(random(), version, Version.CURRENT); + out.setVersion(afterVersion); + if (custom.getRequiredFeature().isPresent()) { + out.setFeatures(Collections.singleton(custom.getRequiredFeature().get())); + } + assertTrue(FeatureAware.shouldSerializeCustom(out, custom)); + } + { + final BytesStreamOutput out = new BytesStreamOutput(); + final Version beforeVersion = + randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(version)); + out.setVersion(beforeVersion); + if (custom.getRequiredFeature().isPresent() && randomBoolean()) { + out.setFeatures(Collections.singleton(custom.getRequiredFeature().get())); + } + assertFalse(FeatureAware.shouldSerializeCustom(out, custom)); + } + } + } + + public void testFeature() { + final Version version = VersionUtils.randomVersion(random()); + final Version afterVersion = randomVersionBetween(random(), version, Version.CURRENT); + final Custom custom = new RequiredFeatureCustom(version); + { + // the feature is present and the client is not a transport client + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(afterVersion); + assertTrue(custom.getRequiredFeature().isPresent()); + out.setFeatures(Collections.singleton(custom.getRequiredFeature().get())); + assertTrue(FeatureAware.shouldSerializeCustom(out, custom)); + } + { + // the feature is present and the client is a transport client + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(afterVersion); + assertTrue(custom.getRequiredFeature().isPresent()); + out.setFeatures(new HashSet<>(Arrays.asList(custom.getRequiredFeature().get(), TransportClient.TRANSPORT_CLIENT_FEATURE))); + assertTrue(FeatureAware.shouldSerializeCustom(out, custom)); + } + } + + public void testMissingFeature() { + final Version version = VersionUtils.randomVersion(random()); + final Version afterVersion = randomVersionBetween(random(), version, Version.CURRENT); + final Custom custom = new RequiredFeatureCustom(version); + { + // the feature is missing but we should serialize it anyway because the client is not a transport client + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(afterVersion); + assertTrue(FeatureAware.shouldSerializeCustom(out, custom)); + } + { + // the feature is missing and we should not serialize it because the client is a transport client + final BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(afterVersion); + out.setFeatures(Collections.singleton(TransportClient.TRANSPORT_CLIENT_FEATURE)); + assertFalse(FeatureAware.shouldSerializeCustom(out, custom)); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 059f82e36037c..e361dc7894308 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -224,6 +224,7 @@ public NodeChannels getConnection(DiscoveryNode node) { .streamInput(streamIn); } threadPool.getThreadContext().readHeaders(streamIn); + assertThat(streamIn.readStringArray(), equalTo(new String[0])); // features assertEquals("foobar", streamIn.readString()); Req readReq = new Req(""); readReq.readFrom(streamIn); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index a7fd6768064e9..cd2735a0f96b5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -26,7 +26,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; import org.apache.lucene.search.Sort; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -68,12 +67,18 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -105,6 +110,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -130,9 +136,11 @@ import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.store.IndicesStore; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ScriptMetaData; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; @@ -1108,7 +1116,8 @@ protected void ensureClusterSizeConsistency() { protected void ensureClusterStateConsistency() throws IOException { if (cluster() != null && cluster().size() > 0) { final NamedWriteableRegistry namedWriteableRegistry = cluster().getNamedWriteableRegistry(); - ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + final Client masterClient = client(); + ClusterState masterClusterState = masterClient.admin().cluster().prepareState().all().get().getState(); byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); // remove local node reference masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null, namedWriteableRegistry); @@ -1124,16 +1133,37 @@ protected void ensureClusterStateConsistency() throws IOException { final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; // Check that the non-master node has the same version of the cluster state as the master and // that the master node matches the master (otherwise there is no requirement for the cluster state to match) - if (masterClusterState.version() == localClusterState.version() && masterId.equals(localClusterState.nodes().getMasterNodeId())) { + if (masterClusterState.version() == localClusterState.version() + && masterId.equals(localClusterState.nodes().getMasterNodeId())) { try { - assertEquals("clusterstate UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); - // We cannot compare serialization bytes since serialization order of maps is not guaranteed - // but we can compare serialization sizes - they should be the same - assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize); - // Compare JSON serialization - assertNull("clusterstate JSON serialization does not match", differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); - } catch (AssertionError error) { - logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); + assertEquals("cluster state UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); + /* + * The cluster state received by the transport client can miss customs that the client does not understand. This + * means that we only expect equality in the cluster state including customs if the master client and the local + * client are of the same type (both or neither are transport clients). Otherwise, we can only assert equality + * modulo non-core customs. + */ + if (isTransportClient(masterClient) == isTransportClient(client)) { + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertEquals("cluster state size does not match", masterClusterStateSize, localClusterStateSize); + // Compare JSON serialization + assertNull( + "cluster state JSON serialization does not match", + differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); + } else { + // remove non-core customs and compare the cluster states + assertNull( + "cluster state JSON serialization does not match (after removing some customs)", + differenceBetweenMapsIgnoringArrayOrder( + convertToMap(removePluginCustoms(masterClusterState)), + convertToMap(removePluginCustoms(localClusterState)))); + } + } catch (final AssertionError error) { + logger.error( + "Cluster state from master:\n{}\nLocal cluster state:\n{}", + masterClusterState.toString(), + localClusterState.toString()); throw error; } } @@ -1142,6 +1172,52 @@ protected void ensureClusterStateConsistency() throws IOException { } + /** + * Tests if the client is a transport client or wraps a transport client. + * + * @param client the client to test + * @return true if the client is a transport client or a wrapped transport client + */ + private boolean isTransportClient(final Client client) { + if (TransportClient.class.isAssignableFrom(client.getClass())) { + return true; + } else if (client instanceof RandomizingClient) { + return isTransportClient(((RandomizingClient) client).in()); + } + return false; + } + + private static final Set SAFE_METADATA_CUSTOMS = + Collections.unmodifiableSet( + new HashSet<>(Arrays.asList(IndexGraveyard.TYPE, IngestMetadata.TYPE, RepositoriesMetaData.TYPE, ScriptMetaData.TYPE))); + + private static final Set SAFE_CUSTOMS = + Collections.unmodifiableSet( + new HashSet<>(Arrays.asList(RestoreInProgress.TYPE, SnapshotDeletionsInProgress.TYPE, SnapshotsInProgress.TYPE))); + + /** + * Remove any customs except for customs that we know all clients understand. + * + * @param clusterState the cluster state to remove possibly-unknown customs from + * @return the cluster state with possibly-unknown customs removed + */ + private ClusterState removePluginCustoms(final ClusterState clusterState) { + final ClusterState.Builder builder = ClusterState.builder(clusterState); + clusterState.customs().keysIt().forEachRemaining(key -> { + if (SAFE_CUSTOMS.contains(key) == false) { + builder.removeCustom(key); + } + }); + final MetaData.Builder mdBuilder = MetaData.builder(clusterState.metaData()); + clusterState.metaData().customs().keysIt().forEachRemaining(key -> { + if (SAFE_METADATA_CUSTOMS.contains(key) == false) { + mdBuilder.removeCustom(key); + } + }); + builder.metaData(mdBuilder); + return builder.build(); + } + /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java index b144898d643d0..6d2b95ea30850 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java @@ -94,4 +94,8 @@ public String toString() { return "randomized(" + super.toString() + ")"; } + public Client in() { + return super.in(); + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index fa03d4077045e..2cc6ec5690b07 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.Constants; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -43,6 +42,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java index 56475de123f3c..d9f7068b2181e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetaData.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.license.License.OperationMode; +import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.EnumSet; @@ -23,7 +24,7 @@ /** * Contains metadata about registered licenses */ -public class LicensesMetaData extends AbstractNamedDiffable implements MetaData.Custom, +public class LicensesMetaData extends AbstractNamedDiffable implements XPackPlugin.XPackMetaDataCustom, MergableCustomMetaData { public static final String TYPE = "licenses"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 0b22cd86fe6a0..a96de96fd4f44 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.license.DeleteLicenseAction; @@ -28,6 +27,7 @@ import org.elasticsearch.license.PostStartBasicAction; import org.elasticsearch.license.PostStartTrialAction; import org.elasticsearch.license.PutLicenseAction; +import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; @@ -61,7 +61,6 @@ import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetFiltersAction; import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction; -import org.elasticsearch.xpack.core.ml.action.MlInfoAction; import org.elasticsearch.xpack.core.ml.action.GetJobsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; @@ -69,6 +68,7 @@ import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; import org.elasticsearch.xpack.core.ml.action.KillProcessAction; +import org.elasticsearch.xpack.core.ml.action.MlInfoAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.PersistJobAction; import org.elasticsearch.xpack.core.ml.action.PostCalendarEventsAction; @@ -91,7 +91,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; -import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction; @@ -133,6 +132,8 @@ import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; +import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; @@ -143,18 +144,25 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; -import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.Supplier; public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin { + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + static Optional X_PACK_FEATURE = Optional.of("x-pack"); + + @Override + protected Optional getFeature() { + return X_PACK_FEATURE; + } + private final Settings settings; public XPackClientPlugin(final Settings settings) { @@ -185,11 +193,10 @@ public Settings additionalSettings() { static Settings additionalSettings(final Settings settings, final boolean enabled, final boolean transportClientMode) { if (enabled && transportClientMode) { - final Settings.Builder builder = Settings.builder(); - builder.put(SecuritySettings.addTransportSettings(settings)); - builder.put(SecuritySettings.addUserSettings(settings)); - builder.put(ThreadContext.PREFIX + "." + "has_xpack", true); - return builder.build(); + return Settings.builder() + .put(SecuritySettings.addTransportSettings(settings)) + .put(SecuritySettings.addUserSettings(settings)) + .build(); } else { return Settings.EMPTY; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 9568a36551c83..602f4bdbc079b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -59,19 +59,15 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; -import javax.security.auth.DestroyFailedException; - -import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.security.AccessController; -import java.security.GeneralSecurityException; import java.security.PrivilegedAction; import java.time.Clock; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.List; +import java.util.Optional; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -316,4 +312,23 @@ public static Path resolveConfigFile(Environment env, String name) { } return config; } + + public interface XPackClusterStateCustom extends ClusterState.Custom { + + @Override + default Optional getRequiredFeature() { + return XPackClientPlugin.X_PACK_FEATURE; + } + + } + + public interface XPackMetaDataCustom extends MetaData.Custom { + + @Override + default Optional getRequiredFeature() { + return XPackClientPlugin.X_PACK_FEATURE; + } + + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 6af323f1510e4..861f386a90966 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -27,6 +27,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; @@ -53,7 +54,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -public class MlMetadata implements MetaData.Custom { +public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { private static final ParseField JOBS_FIELD = new ParseField("jobs"); private static final ParseField DATAFEEDS_FIELD = new ParseField("datafeeds"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java index 6bd6228f2efe1..46111b9b16cd1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java @@ -12,13 +12,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.List; -public final class TokenMetaData extends AbstractNamedDiffable implements ClusterState.Custom { +public final class TokenMetaData extends AbstractNamedDiffable implements XPackPlugin.XPackClusterStateCustom { /** * The type of {@link ClusterState} data. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java index 3a490f08b79e5..9f014dee843c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetaData.java @@ -13,12 +13,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.EnumSet; import java.util.Objects; -public class WatcherMetaData extends AbstractNamedDiffable implements MetaData.Custom { +public class WatcherMetaData extends AbstractNamedDiffable implements XPackPlugin.XPackMetaDataCustom { public static final String TYPE = "watcher"; From 0215c4c430699ee885782a4aff55916daf3000d1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 1 Jun 2018 17:11:03 -0400 Subject: [PATCH 18/18] Adjust BWC version on client features This commit adjusts the BWC version on client features in 6.x to 6.3.0 after the functionality was backported to the 6.3 branch. --- .../org/elasticsearch/transport/netty4/ESLoggingHandler.java | 2 +- .../main/java/org/elasticsearch/transport/TcpTransport.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java index 383fe6d2ac1b8..5ee79834740d7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/ESLoggingHandler.java @@ -105,7 +105,7 @@ else if (readableBytes >= TcpHeader.HEADER_SIZE) { context.readHeaders(in); } // now we decode the features - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { in.readStringArray(); } // now we can decode the action name diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 2aa3fcd7fc64a..9f7dba5651d6f 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -1112,7 +1112,7 @@ private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel cha stream.setVersion(version); threadPool.getThreadContext().writeTo(stream); - if (version.onOrAfter(Version.V_6_4_0)) { + if (version.onOrAfter(Version.V_6_3_0)) { stream.writeStringArray(features); } stream.writeString(action); @@ -1519,7 +1519,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status) throws IOException { final Set features; - if (version.onOrAfter(Version.V_6_4_0)) { + if (version.onOrAfter(Version.V_6_3_0)) { features = Collections.unmodifiableSet(new TreeSet<>(Arrays.asList(stream.readStringArray()))); } else { features = Collections.emptySet();