diff --git a/build.gradle b/build.gradle index ec81047e3e6c9..9bb08cf29dbbc 100644 --- a/build.gradle +++ b/build.gradle @@ -53,9 +53,23 @@ subprojects { description = "Elasticsearch subproject ${project.path}" } +apply plugin: 'nebula.info-scm' +String licenseCommit +if (VersionProperties.elasticsearch.toString().endsWith('-SNAPSHOT')) { + licenseCommit = scminfo.change ?: "master" // leniency for non git builds +} else { + licenseCommit = "v${version}" +} +String elasticLicenseUrl = "https://raw.githubusercontent.com/elastic/elasticsearch/${licenseCommit}/licenses/ELASTIC-LICENSE.txt" + subprojects { + // Default to the apache license project.ext.licenseName = 'The Apache Software License, Version 2.0' project.ext.licenseUrl = 'http://www.apache.org/licenses/LICENSE-2.0.txt' + + // But stick the Elastic license url in project.ext so we can get it if we need to switch to it + project.ext.elasticLicenseUrl = elasticLicenseUrl + // we only use maven publish to add tasks for pom generation plugins.withType(MavenPublishPlugin).whenPluginAdded { publishing { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7aedd395b93b5..d89ffa78ed852 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-0a7c3f462f +lucene = 7.4.0-snapshot-518d303506 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 49a84146dc8f4..2925062e0e75b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client; import com.fasterxml.jackson.core.JsonParseException; + import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -607,7 +608,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(7, namedXContents.size()); + assertEquals(8, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -625,9 +626,10 @@ public void testProvidedNamedXContents() { assertTrue(names.contains(PrecisionAtK.NAME)); assertTrue(names.contains(DiscountedCumulativeGain.NAME)); assertTrue(names.contains(MeanReciprocalRank.NAME)); - assertEquals(Integer.valueOf(2), categories.get(MetricDetail.class)); + assertEquals(Integer.valueOf(3), categories.get(MetricDetail.class)); assertTrue(names.contains(PrecisionAtK.NAME)); assertTrue(names.contains(MeanReciprocalRank.NAME)); + assertTrue(names.contains(DiscountedCumulativeGain.NAME)); } private static class TrackingActionListener implements ActionListener { diff --git a/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java new file mode 100644 index 0000000000000..e4bb43458648b --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/HasAttributeNodeSelector.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * A {@link NodeSelector} that selects nodes that have a particular value + * for an attribute. + */ +public final class HasAttributeNodeSelector implements NodeSelector { + private final String key; + private final String value; + + public HasAttributeNodeSelector(String key, String value) { + this.key = key; + this.value = value; + } + + @Override + public void select(Iterable nodes) { + Iterator itr = nodes.iterator(); + while (itr.hasNext()) { + Map> allAttributes = itr.next().getAttributes(); + if (allAttributes == null) continue; + List values = allAttributes.get(key); + if (values == null || false == values.contains(value)) { + itr.remove(); + } + } + } + + @Override + public String toString() { + return key + "=" + value; + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/Node.java b/client/rest/src/main/java/org/elasticsearch/client/Node.java index d66d0773016e6..f180b52927545 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Node.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Node.java @@ -19,6 +19,8 @@ package org.elasticsearch.client; +import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; @@ -52,13 +54,18 @@ public class Node { * if we don't know what roles the node has. */ private final Roles roles; + /** + * Attributes declared on the node. + */ + private final Map> attributes; /** * Create a {@linkplain Node} with metadata. All parameters except * {@code host} are nullable and implementations of {@link NodeSelector} * need to decide what to do in their absence. */ - public Node(HttpHost host, Set boundHosts, String name, String version, Roles roles) { + public Node(HttpHost host, Set boundHosts, String name, String version, + Roles roles, Map> attributes) { if (host == null) { throw new IllegalArgumentException("host cannot be null"); } @@ -67,13 +74,14 @@ public Node(HttpHost host, Set boundHosts, String name, String version this.name = name; this.version = version; this.roles = roles; + this.attributes = attributes; } /** * Create a {@linkplain Node} without any metadata. */ public Node(HttpHost host) { - this(host, null, null, null, null); + this(host, null, null, null, null, null); } /** @@ -115,6 +123,13 @@ public Roles getRoles() { return roles; } + /** + * Attributes declared on the node. + */ + public Map> getAttributes() { + return attributes; + } + @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -131,6 +146,9 @@ public String toString() { if (roles != null) { b.append(", roles=").append(roles); } + if (attributes != null) { + b.append(", attributes=").append(attributes); + } return b.append(']').toString(); } @@ -144,12 +162,13 @@ public boolean equals(Object obj) { && Objects.equals(boundHosts, other.boundHosts) && Objects.equals(name, other.name) && Objects.equals(version, other.version) - && Objects.equals(roles, other.roles); + && Objects.equals(roles, other.roles) + && Objects.equals(attributes, other.attributes); } @Override public int hashCode() { - return Objects.hash(host, boundHosts, name, version, roles); + return Objects.hash(host, boundHosts, name, version, roles, attributes); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java new file mode 100644 index 0000000000000..8a7c12e8c62de --- /dev/null +++ b/client/rest/src/test/java/org/elasticsearch/client/HasAttributeNodeSelectorTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Node.Roles; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.Assert.assertEquals; + +public class HasAttributeNodeSelectorTests extends RestClientTestCase { + public void testHasAttribute() { + Node hasAttributeValue = dummyNode(singletonMap("attr", singletonList("val"))); + Node hasAttributeButNotValue = dummyNode(singletonMap("attr", singletonList("notval"))); + Node hasAttributeValueInList = dummyNode(singletonMap("attr", Arrays.asList("val", "notval"))); + Node notHasAttribute = dummyNode(singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(hasAttributeValue); + nodes.add(hasAttributeButNotValue); + nodes.add(hasAttributeValueInList); + nodes.add(notHasAttribute); + List expected = new ArrayList<>(); + expected.add(hasAttributeValue); + expected.add(hasAttributeValueInList); + new HasAttributeNodeSelector("attr", "val").select(nodes); + assertEquals(expected, nodes); + } + + private static Node dummyNode(Map> attributes) { + return new Node(new HttpHost("dummy"), Collections.emptySet(), + randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), + new Roles(randomBoolean(), randomBoolean(), randomBoolean()), + attributes); + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java index d9df001ad437e..868ccdcab757d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeSelectorTests.java @@ -63,9 +63,10 @@ public void testNotMasterOnly() { assertEquals(expected, nodes); } - private Node dummyNode(boolean master, boolean data, boolean ingest) { + private static Node dummyNode(boolean master, boolean data, boolean ingest) { return new Node(new HttpHost("dummy"), Collections.emptySet(), randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Roles(master, data, ingest)); + new Roles(master, data, ingest), + Collections.>emptyMap()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java index c6d60415b88dc..9eeeb1144f485 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/NodeTests.java @@ -23,49 +23,67 @@ import org.elasticsearch.client.Node.Roles; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; public class NodeTests extends RestClientTestCase { public void testToString() { + Map> attributes = new HashMap<>(); + attributes.put("foo", singletonList("bar")); + attributes.put("baz", Arrays.asList("bort", "zoom")); assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString()); + assertEquals("[host=http://1, attributes={foo=[bar], baz=[bort, zoom]}]", + new Node(new HttpHost("1"), null, null, null, null, attributes).toString()); assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"), - null, null, null, new Roles(true, true, true)).toString()); + null, null, null, new Roles(true, true, true), null).toString()); assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"), - null, null, "ver", null).toString()); + null, null, "ver", null, null).toString()); assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"), - null, "nam", null, null).toString()); + null, "nam", null, null, null).toString()); assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"), - new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString()); - assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]", + new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null, null).toString()); + assertEquals( + "[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m, attributes={foo=[bar], baz=[bort, zoom]}]", new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), - "nam", "ver", new Roles(true, false, false)).toString()); + "nam", "ver", new Roles(true, false, false), attributes).toString()); } public void testEqualsAndHashCode() { HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5)); Node node = new Node(host, - randomBoolean() ? null : singleton(host), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : randomAsciiAlphanumOfLength(5), - randomBoolean() ? null : new Roles(true, true, true)); + randomBoolean() ? null : singleton(host), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : randomAsciiAlphanumOfLength(5), + randomBoolean() ? null : new Roles(true, true, true), + randomBoolean() ? null : singletonMap("foo", singletonList("bar"))); assertFalse(node.equals(null)); assertTrue(node.equals(node)); assertEquals(node.hashCode(), node.hashCode()); - Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles()); + Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), + node.getRoles(), node.getAttributes()); assertTrue(node.equals(copy)); assertEquals(node.hashCode(), copy.hashCode()); assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(), - node.getName(), node.getVersion(), node.getRoles()))); + node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()))); assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))), - node.getName(), node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles()))); - assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false)))); + node.getName(), node.getVersion(), node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", + node.getVersion(), node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion() + "changed", node.getRoles(), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion(), new Roles(false, false, false), node.getAttributes()))); + assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), + node.getVersion(), node.getRoles(), singletonMap("bort", singletonList("bing"))))); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 92a960090ad6a..d09741ea25b6c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -42,7 +42,9 @@ import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -214,7 +216,8 @@ public void testNodeSelector() throws IOException { restClient.performRequest(request); fail("expected to fail to connect"); } catch (ConnectException e) { - assertEquals("Connection refused", e.getMessage()); + // This is different in windows and linux but this matches both. + assertThat(e.getMessage(), startsWith("Connection refused")); } } else { Response response = restClient.performRequest(request); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index eb591f4ccff3a..d04b3cbb7554e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -342,7 +342,7 @@ public void testSetNodes() throws IOException { List newNodes = new ArrayList<>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false); - newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles)); + newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles, null)); } restClient.setNodes(newNodes); int rounds = between(1, 10); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 01f6f308f6227..04742ccab4f32 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -341,9 +341,9 @@ public void testNullPath() throws IOException { } public void testSelectHosts() throws IOException { - Node n1 = new Node(new HttpHost("1"), null, null, "1", null); - Node n2 = new Node(new HttpHost("2"), null, null, "2", null); - Node n3 = new Node(new HttpHost("3"), null, null, "3", null); + Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); + Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); + Node n3 = new Node(new HttpHost("3"), null, null, "3", null, null); NodeSelector not1 = new NodeSelector() { @Override diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 0cc41b078b8d6..d3a0202747d25 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,6 +36,7 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; @@ -190,11 +191,20 @@ public void onFailure(Exception exception) { //tag::rest-client-options-set-singleton request.setOptions(COMMON_OPTIONS); //end::rest-client-options-set-singleton - //tag::rest-client-options-customize - RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); - options.addHeader("cats", "knock things off of other things"); - request.setOptions(options); - //end::rest-client-options-customize + { + //tag::rest-client-options-customize-header + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.addHeader("cats", "knock things off of other things"); + request.setOptions(options); + //end::rest-client-options-customize-header + } + { + //tag::rest-client-options-customize-attribute + RequestOptions.Builder options = COMMON_OPTIONS.toBuilder(); + options.setNodeSelector(new HasAttributeNodeSelector("rack", "c12")); // <1> + request.setOptions(options); + //end::rest-client-options-customize-attribute + } } { HttpEntity[] documents = new HttpEntity[10]; diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java index da7ef4700fd2f..5c947f5625ba0 100644 --- a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/ElasticsearchNodesSniffer.java @@ -36,12 +36,18 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. * Compatible with elasticsearch 2.x+. @@ -138,16 +144,19 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th Set boundHosts = new HashSet<>(); String name = null; String version = null; - String fieldName = null; - // Used to read roles from 5.0+ + /* + * Multi-valued attributes come with key = `real_key.index` and we + * unflip them after reading them because we can't rely on the order + * that they arive. + */ + final Map protoAttributes = new HashMap(); + boolean sawRoles = false; boolean master = false; boolean data = false; boolean ingest = false; - // Used to read roles from 2.x - Boolean masterAttribute = null; - Boolean dataAttribute = null; - boolean clientAttribute = false; + + String fieldName = null; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); @@ -170,13 +179,12 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th } } else if ("attributes".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) { - masterAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) { - dataAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) { - clientAttribute = toBoolean(parser.getValueAsString()); - } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING) { + String oldValue = protoAttributes.put(parser.getCurrentName(), parser.getValueAsString()); + if (oldValue != null) { + throw new IOException("repeated attribute key [" + parser.getCurrentName() + "]"); + } + } else { parser.skipChildren(); } } @@ -216,21 +224,74 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th if (publishedHost == null) { logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; - } else { - logger.trace("adding node [" + nodeId + "]"); - if (version.startsWith("2.")) { - /* - * 2.x doesn't send roles, instead we try to read them from - * attributes. - */ - master = masterAttribute == null ? false == clientAttribute : masterAttribute; - data = dataAttribute == null ? false == clientAttribute : dataAttribute; - } else { - assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + + Map> realAttributes = new HashMap<>(protoAttributes.size()); + List keys = new ArrayList<>(protoAttributes.keySet()); + for (String key : keys) { + if (key.endsWith(".0")) { + String realKey = key.substring(0, key.length() - 2); + List values = new ArrayList<>(); + int i = 0; + while (true) { + String value = protoAttributes.remove(realKey + "." + i); + if (value == null) { + break; + } + values.add(value); + i++; + } + realAttributes.put(realKey, unmodifiableList(values)); } - assert boundHosts.contains(publishedHost) : - "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; - return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest)); + } + for (Map.Entry entry : protoAttributes.entrySet()) { + realAttributes.put(entry.getKey(), singletonList(entry.getValue())); + } + + if (version.startsWith("2.")) { + /* + * 2.x doesn't send roles, instead we try to read them from + * attributes. + */ + boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); + Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); + Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); + master = masterAttribute == null ? false == clientAttribute : masterAttribute; + data = dataAttribute == null ? false == clientAttribute : dataAttribute; + } else { + assert sawRoles : "didn't see roles for [" + nodeId + "]"; + } + assert boundHosts.contains(publishedHost) : + "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; + logger.trace("adding node [" + nodeId + "]"); + return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest), + unmodifiableMap(realAttributes)); + } + + /** + * Returns {@code defaultValue} if the attribute didn't come back, + * {@code true} or {@code false} if it did come back as + * either of those, or throws an IOException if the attribute + * came back in a strange way. + */ + private static Boolean v2RoleAttributeValue(Map> attributes, + String name, Boolean defaultValue) throws IOException { + List valueList = attributes.remove(name); + if (valueList == null) { + return defaultValue; + } + if (valueList.size() != 1) { + throw new IOException("expected only a single attribute value for [" + name + "] but got " + + valueList); + } + switch (valueList.get(0)) { + case "true": + return true; + case "false": + return false; + default: + throw new IOException("expected [" + name + "] to be either [true] or [false] but was [" + + valueList.get(0) + "]"); } } @@ -248,15 +309,4 @@ public String toString() { return name; } } - - private static boolean toBoolean(String string) { - switch (string) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("[" + string + "] is not a valid boolean"); - } - } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java index 712a836a17b8a..edc7330c13074 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferParseTests.java @@ -30,14 +30,18 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import com.fasterxml.jackson.core.JsonFactory; -import static org.hamcrest.Matchers.hasItem; +import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.hasSize; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; /** @@ -53,10 +57,14 @@ private void checkFile(String file, Node... expected) throws IOException { try { HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); List nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory()); - // Use these assertions because the error messages are nicer than hasItems. + /* + * Use these assertions because the error messages are nicer + * than hasItems and we know the results are in order because + * that is how we generated the file. + */ assertThat(nodes, hasSize(expected.length)); - for (Node expectedNode : expected) { - assertThat(nodes, hasItem(expectedNode)); + for (int i = 0; i < expected.length; i++) { + assertEquals(expected[i], nodes.get(i)); } } finally { in.close(); @@ -66,13 +74,13 @@ private void checkFile(String file, Node... expected) throws IOException { public void test2x() throws IOException { checkFile("2.0.0_nodes_http.json", node(9200, "m1", "2.0.0", true, false, false), - node(9202, "m2", "2.0.0", true, true, false), - node(9201, "m3", "2.0.0", true, false, false), - node(9205, "d1", "2.0.0", false, true, false), + node(9201, "m2", "2.0.0", true, true, false), + node(9202, "m3", "2.0.0", true, false, false), + node(9203, "d1", "2.0.0", false, true, false), node(9204, "d2", "2.0.0", false, true, false), - node(9203, "d3", "2.0.0", false, true, false), - node(9207, "c1", "2.0.0", false, false, false), - node(9206, "c2", "2.0.0", false, false, false)); + node(9205, "d3", "2.0.0", false, true, false), + node(9206, "c1", "2.0.0", false, false, false), + node(9207, "c2", "2.0.0", false, false, false)); } public void test5x() throws IOException { @@ -104,6 +112,10 @@ private Node node(int port, String name, String version, boolean master, boolean Set boundHosts = new HashSet<>(2); boundHosts.add(host); boundHosts.add(new HttpHost("[::1]", port)); - return new Node(host, boundHosts, name, version, new Roles(master, data, ingest)); + Map> attributes = new HashMap<>(); + attributes.put("dummy", singletonList("everyone_has_me")); + attributes.put("number", singletonList(name.substring(1))); + attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1))); + return new Node(host, boundHosts, name, version, new Roles(master, data, ingest), attributes); } } diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java index 260832ca90e17..3d2a74685afcd 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchNodesSnifferTests.java @@ -200,9 +200,21 @@ private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme } } + int numAttributes = between(0, 5); + Map> attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + int numValues = frequently() ? 1 : between(2, 5); + List values = new ArrayList<>(); + for (int v = 0; v < numValues; v++) { + values.add(j + "value" + v); + } + attributes.put("attr" + j, values); + } + Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5), - new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean())); + new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()), + attributes); generator.writeObjectFieldStart(nodeId); if (getRandom().nextBoolean()) { @@ -256,18 +268,17 @@ private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme generator.writeFieldName("name"); generator.writeString(node.getName()); - int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); - Map attributes = new HashMap<>(numAttributes); - for (int j = 0; j < numAttributes; j++) { - attributes.put("attr" + j, "value" + j); - } if (numAttributes > 0) { generator.writeObjectFieldStart("attributes"); - } - for (Map.Entry entry : attributes.entrySet()) { - generator.writeStringField(entry.getKey(), entry.getValue()); - } - if (numAttributes > 0) { + for (Map.Entry> entry : attributes.entrySet()) { + if (entry.getValue().size() == 1) { + generator.writeStringField(entry.getKey(), entry.getValue().get(0)); + } else { + for (int v = 0; v < entry.getValue().size(); v++) { + generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v)); + } + } + } generator.writeEndObject(); } generator.writeEndObject(); diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json index b370e78e16011..22dc4ec13ed51 100644 --- a/client/sniffer/src/test/resources/2.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -1,140 +1,200 @@ { - "cluster_name" : "elasticsearch", - "nodes" : { - "qYUZ_8bTRwODPxukDlFw6Q" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9204", - "attributes" : { - "master" : "false" + "cluster_name": "elasticsearch", + "nodes": { + "qr-SOrELSaGW8SlU8nflBw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9200", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "data": "false", + "array.1": "1", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9200", + "[::1]:9200" + ], + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "Yej5UVNgR2KgBjUFHOQpCw" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9207", - "attributes" : { - "data" : "false", - "master" : "false" + "osfiXxUOQzCVIs-eepgSCA": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9201", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9201", + "[::1]:9201" + ], + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "mHttJwhwReangKEx9EGuAg" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9201", - "attributes" : { - "data" : "false", - "master" : "true" + "lazeJFiIQ8eHHV4GeIdMPg": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9202", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "data": "false", + "array.1": "3", + "master": "true" }, - "http" : { - "bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9202", + "[::1]:9202" + ], + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "6Erdptt_QRGLxMiLi9mTkg" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9206", - "attributes" : { - "data" : "false", - "client" : "true" + "t9WxK-fNRsqV5G0Mm09KpQ": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9203", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9203", + "[::1]:9203" + ], + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "mLRCZBypTiys6e8KY5DMnA" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9200", - "attributes" : { - "data" : "false" + "wgoDzluvTViwUjEsmVesKw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9204", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9204", + "[::1]:9204" + ], + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "pVqOhytXQwetsZVzCBppYw" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9202", - "http" : { - "bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "6j_t3pPhSm-oRTyypTzu5g": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9205", + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3", + "master": "false" + }, + "http": { + "bound_address": [ + "127.0.0.1:9205", + "[::1]:9205" + ], + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "ARyzVfpJSw2a9TOIUpbsBA" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9205", - "attributes" : { - "master" : "false" + "PaEkm0z7Ssiuyfkh3aASag": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9206", + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "data": "false", + "array.1": "1", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9206", + "[::1]:9206" + ], + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "2Hpid-g5Sc2BKCevhN6VQw" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "2.0.0", - "build" : "de54438", - "http_address" : "127.0.0.1:9203", - "attributes" : { - "master" : "false" + "LAFKr2K_QmupqnM_atJqkQ": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "2.0.0", + "build": "de54438", + "http_address": "127.0.0.1:9207", + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "data": "false", + "array.1": "2", + "master": "false" }, - "http" : { - "bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "http": { + "bound_address": [ + "127.0.0.1:9207", + "[::1]:9207" + ], + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json index 7a7d143ecaf43..1358438237fc8 100644 --- a/client/sniffer/src/test/resources/5.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/5.0.0_nodes_http.json @@ -1,168 +1,216 @@ { - "_nodes" : { - "total" : 8, - "successful" : 8, - "failed" : 0 + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 }, - "cluster_name" : "test", - "nodes" : { - "DXz_rhcdSF2xJ96qyjaLVw" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "cluster_name": "elasticsearch", + "nodes": { + "0S4r3NurTYSFSb8R9SxwWA": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ "[::1]:9200", "127.0.0.1:9200" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "53Mi6jYdRgeR1cdyuoNfQQ" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "k_CBrMXARkS57Qb5-3Mw5g": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "master", "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ "[::1]:9201", "127.0.0.1:9201" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "XBIghcHiRlWP9c4vY6rETw" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "6eynRPQ1RleJTeGDuTR9mw": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "master", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9207", - "127.0.0.1:9207" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "cFM30FlyS8K1njH_bovwwQ" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "cbGC-ay1QNWaESvEh5513w": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ "[::1]:9203", "127.0.0.1:9203" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "eoVUVRGNRDyyOapqIcrsIA" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "LexndPpXR2ytYsU5fTElnQ": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "data", "ingest" ], - "http" : { - "bound_address" : [ + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ "[::1]:9204", "127.0.0.1:9204" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "xPN76uDcTP-DyXaRzPg2NQ" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ + "SbNG1DKYSBu20zfOz2gDZQ": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9206", - "127.0.0.1:9206" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "RY0oW2d7TISEqazk-U4Kcw" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "data", + "fM4H-m2WTDWmsGsL7jIJew": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9205", - "127.0.0.1:9205" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "tU0rXEZmQ9GsWfn2TQ4kow" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "5.0.0", - "build_hash" : "253032b", - "roles" : [ - "master", + "pFoh7d0BTbqqI3HKd9na5A": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "5.0.0", + "build_hash": "253032b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9202", - "127.0.0.1:9202" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json index 5a8905da64c89..f0535dfdfb00f 100644 --- a/client/sniffer/src/test/resources/6.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/6.0.0_nodes_http.json @@ -1,168 +1,216 @@ { - "_nodes" : { - "total" : 8, - "successful" : 8, - "failed" : 0 + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 }, - "cluster_name" : "test", - "nodes" : { - "FX9npqGQSL2mOGF8Zkf3hw" : { - "name" : "m2", - "transport_address" : "127.0.0.1:9301", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "cluster_name": "elasticsearch", + "nodes": { + "ikXK_skVTfWkhONhldnbkw": { + "name": "m1", + "transport_address": "127.0.0.1:9300", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "master", - "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9201", - "127.0.0.1:9201" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "m", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9200", + "127.0.0.1:9200" ], - "publish_address" : "127.0.0.1:9201", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9200", + "max_content_length_in_bytes": 104857600 } }, - "jmUqzYLGTbWCg127kve3Tg" : { - "name" : "d1", - "transport_address" : "127.0.0.1:9303", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "TMHa34w4RqeuYoHCfJGXZg": { + "name": "m2", + "transport_address": "127.0.0.1:9301", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9203", - "127.0.0.1:9203" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "m", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9201", + "127.0.0.1:9201" ], - "publish_address" : "127.0.0.1:9203", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9201", + "max_content_length_in_bytes": 104857600 } }, - "soBU6bzvTOqdLxPstSbJ2g" : { - "name" : "d3", - "transport_address" : "127.0.0.1:9305", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", + "lzaMRJTVT166sgVZdQ5thA": { + "name": "m3", + "transport_address": "127.0.0.1:9302", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "master", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9205", - "127.0.0.1:9205" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "m", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9202", + "127.0.0.1:9202" ], - "publish_address" : "127.0.0.1:9205", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9202", + "max_content_length_in_bytes": 104857600 } }, - "mtYDAhURTP6twdmNAkMnOg" : { - "name" : "m3", - "transport_address" : "127.0.0.1:9302", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "master", + "tGP5sUecSd6BLTWk1NWF8Q": { + "name": "d1", + "transport_address": "127.0.0.1:9303", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9202", - "127.0.0.1:9202" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "d", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9203", + "127.0.0.1:9203" ], - "publish_address" : "127.0.0.1:9202", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9203", + "max_content_length_in_bytes": 104857600 } }, - "URxHiUQPROOt1G22Ev6lXw" : { - "name" : "c2", - "transport_address" : "127.0.0.1:9307", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "c1UgW5ROTkSa2YnM_T56tw": { + "name": "d2", + "transport_address": "127.0.0.1:9304", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9207", - "127.0.0.1:9207" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "d", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9204", + "127.0.0.1:9204" ], - "publish_address" : "127.0.0.1:9207", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9204", + "max_content_length_in_bytes": 104857600 } }, - "_06S_kWoRqqFR8Z8CS3JRw" : { - "name" : "c1", - "transport_address" : "127.0.0.1:9306", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ + "QM9yjqjmS72MstpNYV_trg": { + "name": "d3", + "transport_address": "127.0.0.1:9305", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ + "data", "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9206", - "127.0.0.1:9206" + "attributes": { + "dummy": "everyone_has_me", + "number": "3", + "array.0": "d", + "array.1": "3" + }, + "http": { + "bound_address": [ + "[::1]:9205", + "127.0.0.1:9205" ], - "publish_address" : "127.0.0.1:9206", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9205", + "max_content_length_in_bytes": 104857600 } }, - "QZE5Bd6DQJmnfVs2dglOvA" : { - "name" : "d2", - "transport_address" : "127.0.0.1:9304", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "data", + "wLtzAssoQYeX_4TstgCj0Q": { + "name": "c1", + "transport_address": "127.0.0.1:9306", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9204", - "127.0.0.1:9204" + "attributes": { + "dummy": "everyone_has_me", + "number": "1", + "array.0": "c", + "array.1": "1" + }, + "http": { + "bound_address": [ + "[::1]:9206", + "127.0.0.1:9206" ], - "publish_address" : "127.0.0.1:9204", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9206", + "max_content_length_in_bytes": 104857600 } }, - "_3mTXg6dSweZn5ReB2fQqw" : { - "name" : "m1", - "transport_address" : "127.0.0.1:9300", - "host" : "127.0.0.1", - "ip" : "127.0.0.1", - "version" : "6.0.0", - "build_hash" : "8f0685b", - "roles" : [ - "master", + "ONOzpst8TH-ZebG7fxGwaA": { + "name": "c2", + "transport_address": "127.0.0.1:9307", + "host": "127.0.0.1", + "ip": "127.0.0.1", + "version": "6.0.0", + "build_hash": "8f0685b", + "roles": [ "ingest" ], - "http" : { - "bound_address" : [ - "[::1]:9200", - "127.0.0.1:9200" + "attributes": { + "dummy": "everyone_has_me", + "number": "2", + "array.0": "c", + "array.1": "2" + }, + "http": { + "bound_address": [ + "[::1]:9207", + "127.0.0.1:9207" ], - "publish_address" : "127.0.0.1:9200", - "max_content_length_in_bytes" : 104857600 + "publish_address": "127.0.0.1:9207", + "max_content_length_in_bytes": 104857600 } } } diff --git a/client/sniffer/src/test/resources/create_test_nodes_info.bash b/client/sniffer/src/test/resources/create_test_nodes_info.bash new file mode 100644 index 0000000000000..f4f1c09882ea8 --- /dev/null +++ b/client/sniffer/src/test/resources/create_test_nodes_info.bash @@ -0,0 +1,107 @@ +#!/bin/bash + +# Recreates the v_nodes_http.json files in this directory. This is +# meant to be an "every once in a while" thing that we do only when +# we want to add a new version of Elasticsearch or configure the +# nodes differently. That is why we don't do this in gradle. It also +# allows us to play fast and loose with error handling. If something +# goes wrong you have to manually clean up which is good because it +# leaves around the kinds of things that we need to debug the failure. + +# I built this file so the next time I have to regenerate these +# v_nodes_http.json files I won't have to reconfigure Elasticsearch +# from scratch. While I was at it I took the time to make sure that +# when we do rebuild the files they don't jump around too much. That +# way the diffs are smaller. + +set -e + +script_path="$( cd "$(dirname "$0")" ; pwd -P )" +work=$(mktemp -d) +pushd ${work} >> /dev/null +echo Working in ${work} + +wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz +sha1sum -c - << __SHAs +e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz +d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz +__SHAs +sha512sum -c - << __SHAs +25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz +__SHAs + + +function do_version() { + local version=$1 + local nodes='m1 m2 m3 d1 d2 d3 c1 c2' + rm -rf ${version} + mkdir -p ${version} + pushd ${version} >> /dev/null + + tar xf ../elasticsearch-${version}.tar.gz + local http_port=9200 + for node in ${nodes}; do + mkdir ${node} + cp -r elasticsearch-${version}/* ${node} + local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false) + local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false) + # m2 is always master and data for these test just so we have a node like that + data=$([[ "$node" == 'm2' ]] && echo true || echo ${data}) + local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr') + local transport_port=$((http_port+100)) + + cat >> ${node}/config/elasticsearch.yml << __ES_YML +node.name: ${node} +node.master: ${master} +node.data: ${data} +node${attr}.dummy: everyone_has_me +node${attr}.number: ${node:1} +node${attr}.array: [${node:0:1}, ${node:1}] +http.port: ${http_port} +transport.tcp.port: ${transport_port} +discovery.zen.minimum_master_nodes: 3 +discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302'] +__ES_YML + + if [ ${version} != '2.0.0' ]; then + perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options + fi + + echo "starting ${version}/${node}..." + ${node}/bin/elasticsearch -d -p ${node}/pidfile + + ((http_port++)) + done + + echo "waiting for cluster to form" + # got to wait for all the nodes + until curl -s localhost:9200; do + sleep .25 + done + + echo "waiting for all nodes to join" + until [ $(echo ${nodes} | wc -w) -eq $(curl -s localhost:9200/_cat/nodes | wc -l) ]; do + sleep .25 + done + + # jq sorts the nodes by their http host so the file doesn't jump around when we regenerate it + curl -s localhost:9200/_nodes/http?pretty \ + | jq '[to_entries[] | ( select(.key == "nodes").value|to_entries|sort_by(.value.http.publish_address)|from_entries|{"key": "nodes", "value": .} ) // .] | from_entries' \ + > ${script_path}/${version}_nodes_http.json + + for node in ${nodes}; do + echo "stopping ${version}/${node}..." + kill $(cat ${node}/pidfile) + done + + popd >> /dev/null +} + +JAVA_HOME=$JAVA8_HOME do_version 2.0.0 +JAVA_HOME=$JAVA8_HOME do_version 5.0.0 +JAVA_HOME=$JAVA8_HOME do_version 6.0.0 + +popd >> /dev/null +rm -rf ${work} diff --git a/client/sniffer/src/test/resources/readme.txt b/client/sniffer/src/test/resources/readme.txt index ccb9bb15edb55..c6dd32a0410a5 100644 --- a/client/sniffer/src/test/resources/readme.txt +++ b/client/sniffer/src/test/resources/readme.txt @@ -2,3 +2,5 @@ few nodes in different configurations locally at various versions. They are for testing `ElasticsearchNodesSniffer` against different versions of Elasticsearch. + +See create_test_nodes_info.bash for how to create these. diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index c1097b68b898f..71606c2c027a5 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -228,6 +228,8 @@ subprojects { check.dependsOn checkNotice if (project.name == 'zip' || project.name == 'tar') { + project.ext.licenseName = 'Elastic License' + project.ext.licenseUrl = ext.elasticLicenseUrl task checkMlCppNotice { dependsOn buildDist, checkExtraction onlyIf toolExists diff --git a/distribution/packages/src/deb/init.d/elasticsearch b/distribution/packages/src/deb/init.d/elasticsearch index 6d3efd99ca6fe..21ac80a9c22aa 100755 --- a/distribution/packages/src/deb/init.d/elasticsearch +++ b/distribution/packages/src/deb/init.d/elasticsearch @@ -122,7 +122,7 @@ case "$1" in ulimit -l $MAX_LOCKED_MEMORY fi - if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -ge $(cat /proc/sys/vm/max_map_count) ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi diff --git a/distribution/packages/src/rpm/init.d/elasticsearch b/distribution/packages/src/rpm/init.d/elasticsearch index 01dc4e691c07d..d0fb4f759d184 100644 --- a/distribution/packages/src/rpm/init.d/elasticsearch +++ b/distribution/packages/src/rpm/init.d/elasticsearch @@ -90,7 +90,7 @@ start() { if [ -n "$MAX_LOCKED_MEMORY" ]; then ulimit -l $MAX_LOCKED_MEMORY fi - if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count -a "$MAX_MAP_COUNT" -ge $(cat /proc/sys/vm/max_map_count) ]; then sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT fi diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 407947000de35..1f8b302715f42 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -312,9 +312,17 @@ adds an extra header: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize] +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-header] -------------------------------------------------- +Or you can send requests to nodes with a particular attribute: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize-attribute] +-------------------------------------------------- +<1> Replace the node selector with one that selects nodes on a particular rack. + ==== Multiple parallel asynchronous actions diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index bff64ebdc9186..6701d53c24047 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -284,22 +284,7 @@ You may further restrict the permissions by specifying a prefix within the bucke // NOTCONSOLE The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository -registration will fail. If you want Elasticsearch to create the bucket instead, you can add the permission to create a -specific bucket like this: - -[source,js] ----- -{ - "Action": [ - "s3:CreateBucket" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::snaps.example.com" - ] -} ----- -// NOTCONSOLE +registration will fail. [[repository-s3-aws-vpc]] [float] diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 447873a595bbe..b155cfef302fb 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -244,6 +244,94 @@ GET /alias2/_search?q=user:kimchy&routing=2,3 // CONSOLE // TEST[continued] +[float] +[[aliases-write-index]] +==== Write Index + +It is possible to associate the index pointed to by an alias as the write index. +When specified, all index and update requests against an alias that point to multiple +indices will attempt to resolve to the one index that is the write index. +Only one index per alias can be assigned to be the write index at a time. If no write index is specified +and there are multiple indices referenced by an alias, then writes will not be allowed. + +It is possible to specify an index associated with an alias as a write index using both the aliases API +and index creation API. + +[source,js] +-------------------------------------------------- +POST /_aliases +{ + "actions" : [ + { + "add" : { + "index" : "test", + "alias" : "alias1", + "is_write_index" : true + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT test\n/] + +In this example, we associate the alias `alias1` to both `test` and `test2`, where +`test` will be the index chosen for writing to. + +[source,js] +-------------------------------------------------- +PUT /alias1/_doc/1 +{ + "foo": "bar" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The new document that was indexed to `/alias1/_doc/1` will be indexed as if it were +`/test/_doc/1`. + +[source,js] +-------------------------------------------------- +GET /test/_doc/1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +To swap which index is the write index for an alias, the Aliases API can be leveraged to +do an atomic swap. The swap is not dependent on the ordering of the actions. + +[source,js] +-------------------------------------------------- +POST /_aliases +{ + "actions" : [ + { + "add" : { + "index" : "test", + "alias" : "alias1", + "is_write_index" : true + } + }, { + "add" : { + "index" : "test2", + "alias" : "alias1", + "is_write_index" : false + } + } + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT test\nPUT test2\n/] + +[IMPORTANT] +===================================== +Aliases that do not explicitly set `is_write_index: true` for an index, and +only reference one index, will have that referenced index behave as if it is the write index +until an additional index is referenced. At that point, there will be no write index and +writes will be rejected. +===================================== [float] [[alias-adding]] diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 15db4b7a94a85..aa74068419df0 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -17,6 +17,9 @@ As a general rule: * Migration between non-consecutive major versions -- e.g. `5.x` to `7.x` -- is not supported. -See <> for more info. +For more information, see <>. + +See also <> and <>. + -- include::migrate_7_0.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index c80b0ae64a371..42fd6b7afbe73 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -4,6 +4,8 @@ This section discusses the changes that you need to be aware of when migrating your application to Elasticsearch 7.0. +See also <> and <>. + [float] === Indices created before 7.0 diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 0e62fa207f6eb..b18f7c57a1668 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -279,6 +279,13 @@ docker build --tag=elasticsearch-custom . docker run -ti -v /usr/share/elasticsearch/data elasticsearch-custom -------------------------------------------- +Some plugins require additional security permissions. You have to explicitly accept +them either by attaching a `tty` when you run the Docker image and accepting yes at +the prompts, or inspecting the security permissions separately and if you are +comfortable with them adding the `--batch` flag to the plugin install command. +See {plugins}/_other_command_line_parameters.html[Plugin Management documentation] +for more details. + ===== D. Override the image's default https://docs.docker.com/engine/reference/run/#cmd-default-command-or-options[CMD] Options can be passed as command-line options to the {es} process by diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java b/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java similarity index 92% rename from server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java rename to libs/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java index e0b8aea178c70..a30e7490ff445 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/AbstractRefCounted.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import org.apache.lucene.store.AlreadyClosedException; - import java.util.concurrent.atomic.AtomicInteger; /** @@ -68,7 +66,7 @@ public final void decRef() { } protected void alreadyClosed() { - throw new AlreadyClosedException(name + " is already closed can't increment refCount current count [" + refCount.get() + "]"); + throw new IllegalStateException(name + " is already closed can't increment refCount current count [" + refCount.get() + "]"); } /** diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java b/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java similarity index 95% rename from server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java rename to libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java index b2cc8b99c63de..1e7bdc0e78faa 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/common/util/concurrent/RefCounted.java @@ -44,7 +44,7 @@ public interface RefCounted { * * @see #decRef * @see #tryIncRef() - * @throws org.apache.lucene.store.AlreadyClosedException iff the reference counter can not be incremented. + * @throws IllegalStateException iff the reference counter can not be incremented. */ void incRef(); diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java b/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java similarity index 94% rename from server/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java rename to libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java index b2664b134ed8e..ebcf12482dfa7 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java +++ b/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common.util.concurrent; -import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -70,14 +69,14 @@ public void testRefCount() throws IOException { try { counted.incRef(); fail(" expected exception"); - } catch (AlreadyClosedException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), equalTo("test is already closed can't increment refCount current count [0]")); } try { counted.ensureOpen(); fail(" expected exception"); - } catch (AlreadyClosedException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), equalTo("closed")); } } @@ -116,7 +115,7 @@ public void run() { try { counted.ensureOpen(); fail("expected to be closed"); - } catch (AlreadyClosedException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), equalTo("closed")); } assertThat(counted.refCount(), is(0)); @@ -140,7 +139,7 @@ protected void closeInternal() { public void ensureOpen() { if (closed.get()) { assert this.refCount() == 0; - throw new AlreadyClosedException("closed"); + throw new IllegalStateException("closed"); } } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java b/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java index f671b39d4d61b..7c718237cd20e 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java @@ -19,6 +19,7 @@ package org.elasticsearch.nio; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.nio.utils.ExceptionsHelper; import java.nio.ByteBuffer; @@ -41,6 +42,7 @@ public final class InboundChannelBuffer implements AutoCloseable { private static final int PAGE_MASK = PAGE_SIZE - 1; private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(PAGE_SIZE); private static final ByteBuffer[] EMPTY_BYTE_BUFFER_ARRAY = new ByteBuffer[0]; + private static final Page[] EMPTY_BYTE_PAGE_ARRAY = new Page[0]; private final ArrayDeque pages; @@ -152,6 +154,46 @@ public ByteBuffer[] sliceBuffersTo(long to) { return buffers; } + /** + * This method will return an array of {@link Page} representing the bytes from the beginning of + * this buffer up through the index argument that was passed. The pages and buffers will be duplicates of + * the internal components, so any modifications to the markers {@link ByteBuffer#position()}, + * {@link ByteBuffer#limit()}, etc will not modify the this class. Additionally, this will internally + * retain the underlying pages, so the pages returned by this method must be closed. + * + * @param to the index to slice up to + * @return the pages + */ + public Page[] sliceAndRetainPagesTo(long to) { + if (to > capacity) { + throw new IndexOutOfBoundsException("can't slice a channel buffer with capacity [" + capacity + + "], with slice parameters to [" + to + "]"); + } else if (to == 0) { + return EMPTY_BYTE_PAGE_ARRAY; + } + long indexWithOffset = to + offset; + int pageCount = pageIndex(indexWithOffset); + int finalLimit = indexInPage(indexWithOffset); + if (finalLimit != 0) { + pageCount += 1; + } + + Page[] pages = new Page[pageCount]; + Iterator pageIterator = this.pages.iterator(); + Page firstPage = pageIterator.next().duplicate(); + ByteBuffer firstBuffer = firstPage.byteBuffer; + firstBuffer.position(firstBuffer.position() + offset); + pages[0] = firstPage; + for (int i = 1; i < pages.length; i++) { + pages[i] = pageIterator.next().duplicate(); + } + if (finalLimit != 0) { + pages[pages.length - 1].byteBuffer.limit(finalLimit); + } + + return pages; + } + /** * This method will return an array of {@link ByteBuffer} representing the bytes from the index passed * through the end of this buffer. The buffers will be duplicates of the internal buffers, so any @@ -231,16 +273,49 @@ private int indexInPage(long index) { public static class Page implements AutoCloseable { private final ByteBuffer byteBuffer; - private final Runnable closeable; + // This is reference counted as some implementations want to retain the byte pages by calling + // sliceAndRetainPagesTo. With reference counting we can increment the reference count, return the + // pages, and safely close them when this channel buffer is done with them. The reference count + // would be 1 at that point, meaning that the pages will remain until the implementation closes + // theirs. + private final RefCountedCloseable refCountedCloseable; public Page(ByteBuffer byteBuffer, Runnable closeable) { + this(byteBuffer, new RefCountedCloseable(closeable)); + } + + private Page(ByteBuffer byteBuffer, RefCountedCloseable refCountedCloseable) { this.byteBuffer = byteBuffer; - this.closeable = closeable; + this.refCountedCloseable = refCountedCloseable; + } + + private Page duplicate() { + refCountedCloseable.incRef(); + return new Page(byteBuffer.duplicate(), refCountedCloseable); + } + + public ByteBuffer getByteBuffer() { + return byteBuffer; } @Override public void close() { - closeable.run(); + refCountedCloseable.decRef(); + } + + private static class RefCountedCloseable extends AbstractRefCounted { + + private final Runnable closeable; + + private RefCountedCloseable(Runnable closeable) { + super("byte array page"); + this.closeable = closeable; + } + + @Override + protected void closeInternal() { + closeable.run(); + } } } } diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java index 199a509cbfabb..8dd72e869e8d9 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java @@ -31,7 +31,8 @@ public class InboundChannelBufferTests extends ESTestCase { private static final int PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES; private final Supplier defaultPageSupplier = () -> - new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); + new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> { + }); public void testNewBufferHasSinglePage() { InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); @@ -167,6 +168,49 @@ public void testClose() { expectThrows(IllegalStateException.class, () -> channelBuffer.ensureCapacity(1)); } + public void testCloseRetainedPages() { + ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>(); + Supplier supplier = () -> { + AtomicBoolean atomicBoolean = new AtomicBoolean(); + queue.add(atomicBoolean); + return new InboundChannelBuffer.Page(ByteBuffer.allocate(PAGE_SIZE), () -> atomicBoolean.set(true)); + }; + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(supplier); + channelBuffer.ensureCapacity(PAGE_SIZE * 4); + + assertEquals(4, queue.size()); + + for (AtomicBoolean closedRef : queue) { + assertFalse(closedRef.get()); + } + + InboundChannelBuffer.Page[] pages = channelBuffer.sliceAndRetainPagesTo(PAGE_SIZE * 2); + + pages[1].close(); + + for (AtomicBoolean closedRef : queue) { + assertFalse(closedRef.get()); + } + + channelBuffer.close(); + + int i = 0; + for (AtomicBoolean closedRef : queue) { + if (i < 1) { + assertFalse(closedRef.get()); + } else { + assertTrue(closedRef.get()); + } + ++i; + } + + pages[0].close(); + + for (AtomicBoolean closedRef : queue) { + assertTrue(closedRef.get()); + } + } + public void testAccessByteBuffers() { InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 25e2291d36e8b..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf2cfa0551ebdf08a2cf3079f3c74643bd9dbb76 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..2e666a2d566b0 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +a57659a275921d8ab3f7ec580e9bf713ce6143b1 \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index 661af1b6c9137..988a31a24ee27 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -376,7 +376,8 @@ private static MethodHandle lookupReferenceInternal(Definition definition, Looku ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateMethodType + ref.delegateMethodType, + ref.isDelegateInterface ? 1 : 0 ); return callSite.dynamicInvoker().asType(MethodType.methodType(clazz.clazz, captures)); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index f97df128f15e5..75575d6f12568 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless; import org.elasticsearch.painless.spi.Whitelist; +import org.objectweb.asm.Opcodes; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; @@ -202,16 +203,28 @@ public MethodType getMethodType() { public void write(MethodWriter writer) { final org.objectweb.asm.Type type; + final Class clazz; if (augmentation != null) { assert java.lang.reflect.Modifier.isStatic(modifiers); + clazz = augmentation; type = org.objectweb.asm.Type.getType(augmentation); } else { + clazz = owner.clazz; type = owner.type; } if (java.lang.reflect.Modifier.isStatic(modifiers)) { - writer.invokeStatic(type, method); - } else if (java.lang.reflect.Modifier.isInterface(owner.clazz.getModifiers())) { + // invokeStatic assumes that the owner class is not an interface, so this is a + // special case for interfaces where the interface method boolean needs to be set to + // true to reference the appropriate class constant when calling a static interface + // method since java 8 did not check, but java 9 and 10 do + if (java.lang.reflect.Modifier.isInterface(clazz.getModifiers())) { + writer.visitMethodInsn(Opcodes.INVOKESTATIC, + type.getInternalName(), name, getMethodType().toMethodDescriptorString(), true); + } else { + writer.invokeStatic(type, method); + } + } else if (java.lang.reflect.Modifier.isInterface(clazz.getModifiers())) { writer.invokeInterface(type, method); } else { writer.invokeVirtual(type, method); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java index 66cf78e857220..0b698dd244192 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java @@ -66,6 +66,9 @@ public class FunctionRef { /** delegate method type method as type */ public final Type delegateType; + /** whether a call is made on a delegate interface */ + public final boolean isDelegateInterface; + /** * Creates a new FunctionRef, which will resolve {@code type::call} from the whitelist. * @param definition the whitelist against which this script is being compiled @@ -97,10 +100,13 @@ public FunctionRef(Class expected, Method interfaceMethod, Method delegateMet // the Painless$Script class can be inferred if owner is null if (delegateMethod.owner == null) { delegateClassName = CLASS_NAME; + isDelegateInterface = false; } else if (delegateMethod.augmentation != null) { delegateClassName = delegateMethod.augmentation.getName(); + isDelegateInterface = delegateMethod.augmentation.isInterface(); } else { delegateClassName = delegateMethod.owner.clazz.getName(); + isDelegateInterface = delegateMethod.owner.clazz.isInterface(); } if ("".equals(delegateMethod.name)) { @@ -139,6 +145,7 @@ public FunctionRef(Class expected, delegateInvokeType = H_INVOKESTATIC; this.delegateMethodName = delegateMethodName; this.delegateMethodType = delegateMethodType.dropParameterTypes(0, numCaptures); + isDelegateInterface = false; this.interfaceMethod = null; delegateMethod = null; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java index 7a2ec9da34e29..3fc8554b271e2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java @@ -188,6 +188,10 @@ private Capture(int count, Class type) { * @param delegateMethodName The name of the method to be called in the Painless script class * @param delegateMethodType The type of method call in the Painless script class without * the captured types + * @param isDelegateInterface If the method to be called is owned by an interface where + * if the value is '1' if the delegate is an interface and '0' + * otherwise; note this is an int because the bootstrap method + * cannot convert constants to boolean * @return A {@link CallSite} linked to a factory method for creating a lambda class * that implements the expected functional interface * @throws LambdaConversionException Thrown when an illegal type conversion occurs at link time @@ -200,7 +204,8 @@ public static CallSite lambdaBootstrap( String delegateClassName, int delegateInvokeType, String delegateMethodName, - MethodType delegateMethodType) + MethodType delegateMethodType, + int isDelegateInterface) throws LambdaConversionException { Loader loader = (Loader)lookup.lookupClass().getClassLoader(); String lambdaClassName = Type.getInternalName(lookup.lookupClass()) + "$$Lambda" + loader.newLambdaIdentifier(); @@ -225,7 +230,7 @@ public static CallSite lambdaBootstrap( generateInterfaceMethod(cw, factoryMethodType, lambdaClassType, interfaceMethodName, interfaceMethodType, delegateClassType, delegateInvokeType, - delegateMethodName, delegateMethodType, captures); + delegateMethodName, delegateMethodType, isDelegateInterface == 1, captures); endLambdaClass(cw); @@ -369,6 +374,7 @@ private static void generateInterfaceMethod( int delegateInvokeType, String delegateMethodName, MethodType delegateMethodType, + boolean isDelegateInterface, Capture[] captures) throws LambdaConversionException { @@ -434,7 +440,7 @@ private static void generateInterfaceMethod( Handle delegateHandle = new Handle(delegateInvokeType, delegateClassType.getInternalName(), delegateMethodName, delegateMethodType.toMethodDescriptorString(), - delegateInvokeType == H_INVOKEINTERFACE); + isDelegateInterface); iface.invokeDynamic(delegateMethodName, Type.getMethodType(interfaceMethodType .toMethodDescriptorString()).getDescriptor(), DELEGATE_BOOTSTRAP_HANDLE, delegateHandle); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index 9150e2609b700..18d7d94492e67 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -141,8 +141,8 @@ public final class WriterConstants { /** invokedynamic bootstrap for lambda expression/method references */ public static final MethodType LAMBDA_BOOTSTRAP_TYPE = - MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, - MethodType.class, MethodType.class, String.class, int.class, String.class, MethodType.class); + MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, MethodType.class, + MethodType.class, String.class, int.class, String.class, MethodType.class, int.class); public static final Handle LAMBDA_BOOTSTRAP_HANDLE = new Handle(Opcodes.H_INVOKESTATIC, Type.getInternalName(LambdaBootstrap.class), "lambdaBootstrap", LAMBDA_BOOTSTRAP_TYPE.toMethodDescriptorString(), false); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java index 724679d3f8538..e6f2f7ebf91f9 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECapturingFunctionRef.java @@ -121,7 +121,8 @@ void write(MethodWriter writer, Globals globals) { ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateType + ref.delegateType, + ref.isDelegateInterface ? 1 : 0 ); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java index 636623004c982..c82b1003a55f1 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFunctionRef.java @@ -112,7 +112,8 @@ void write(MethodWriter writer, Globals globals) { ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateType + ref.delegateType, + ref.isDelegateInterface ? 1 : 0 ); } else { // TODO: don't do this: its just to cutover :) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java index c37ff435f566f..a7213e75ca485 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELambda.java @@ -222,7 +222,8 @@ void write(MethodWriter writer, Globals globals) { ref.delegateClassName, ref.delegateInvokeType, ref.delegateMethodName, - ref.delegateType + ref.delegateType, + ref.isDelegateInterface ? 1 : 0 ); } else { // placeholder diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java index 97e1f01fdfc94..6ff727d987cdd 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BasicExpressionTests.java @@ -264,6 +264,11 @@ public void testNullSafeDeref() { // assertEquals(null, exec("def a = ['thing': 'bar']; a.other?.cat?.dog = 'wombat'; return a.other?.cat?.dog")); } + // test to ensure static interface methods are called correctly + public void testStaticInterfaceMethod() { + assertEquals(4, exec("def values = [1, 4, 3, 2]; values.sort(Comparator.comparing(p -> p)); return values[3]")); + } + private void assertMustBeNullable(String script) { Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> exec(script)); assertEquals("Result of null safe operator must be nullable", e.getMessage()); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java index 7c49d042108ef..fd47db6b83d41 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java @@ -184,6 +184,11 @@ public void testInterfaceDefaultMethodDef() { "def map = new HashMap(); f(map::getOrDefault)")); } + public void testInterfaceStaticMethod() { + assertEquals(-1, exec("Supplier get(Supplier supplier) { return supplier }" + + "Supplier s = get(Comparator::naturalOrder); s.get().compare(1, 2)")); + } + public void testMethodMissing() { Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { exec("List l = [2, 1]; l.sort(Integer::bogus); return l.get(0);"); diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index f0479f6e4abcc..0692fda502560 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -23,3 +23,12 @@ esplugin { hasClientJar = true } +integTestCluster { + // Modules who's integration is explicitly tested in integration tests + module project(':modules:lang-mustache') +} + +run { + // Modules who's integration is explicitly tested in integration tests + module project(':modules:lang-mustache') +} diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index 13926d7d362ff..01a6e35299b29 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -36,6 +36,7 @@ import java.util.Optional; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.index.rankeval.EvaluationMetric.joinHitsWithRatings; @@ -129,26 +130,31 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, .collect(Collectors.toList()); List ratedHits = joinHitsWithRatings(hits, ratedDocs); List ratingsInSearchHits = new ArrayList<>(ratedHits.size()); + int unratedResults = 0; for (RatedSearchHit hit : ratedHits) { - // unknownDocRating might be null, which means it will be unrated docs are - // ignored in the dcg calculation - // we still need to add them as a placeholder so the rank of the subsequent - // ratings is correct + // unknownDocRating might be null, in which case unrated docs will be ignored in the dcg calculation. + // we still need to add them as a placeholder so the rank of the subsequent ratings is correct ratingsInSearchHits.add(hit.getRating().orElse(unknownDocRating)); + if (hit.getRating().isPresent() == false) { + unratedResults++; + } } - double dcg = computeDCG(ratingsInSearchHits); + final double dcg = computeDCG(ratingsInSearchHits); + double result = dcg; + double idcg = 0; if (normalize) { Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder())); - double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); - if (idcg > 0) { - dcg = dcg / idcg; + idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); + if (idcg != 0) { + result = dcg / idcg; } else { - dcg = 0; + result = 0; } } - EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg); + EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, result); evalQueryQuality.addHitsAndRatings(ratedHits); + evalQueryQuality.setMetricDetails(new Detail(dcg, idcg, unratedResults)); return evalQueryQuality; } @@ -167,7 +173,7 @@ private static double computeDCG(List ratings) { private static final ParseField K_FIELD = new ParseField("k"); private static final ParseField NORMALIZE_FIELD = new ParseField("normalize"); private static final ParseField UNKNOWN_DOC_RATING_FIELD = new ParseField("unknown_doc_rating"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("dcg_at", false, + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("dcg", false, args -> { Boolean normalized = (Boolean) args[0]; Integer optK = (Integer) args[2]; @@ -217,4 +223,118 @@ public final boolean equals(Object obj) { public final int hashCode() { return Objects.hash(normalize, unknownDocRating, k); } + + public static final class Detail implements MetricDetail { + + private static ParseField DCG_FIELD = new ParseField("dcg"); + private static ParseField IDCG_FIELD = new ParseField("ideal_dcg"); + private static ParseField NDCG_FIELD = new ParseField("normalized_dcg"); + private static ParseField UNRATED_FIELD = new ParseField("unrated_docs"); + private final double dcg; + private final double idcg; + private final int unratedDocs; + + Detail(double dcg, double idcg, int unratedDocs) { + this.dcg = dcg; + this.idcg = idcg; + this.unratedDocs = unratedDocs; + } + + Detail(StreamInput in) throws IOException { + this.dcg = in.readDouble(); + this.idcg = in.readDouble(); + this.unratedDocs = in.readVInt(); + } + + @Override + public + String getMetricName() { + return NAME; + } + + @Override + public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(DCG_FIELD.getPreferredName(), this.dcg); + if (this.idcg != 0) { + builder.field(IDCG_FIELD.getPreferredName(), this.idcg); + builder.field(NDCG_FIELD.getPreferredName(), this.dcg / this.idcg); + } + builder.field(UNRATED_FIELD.getPreferredName(), this.unratedDocs); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + return new Detail((Double) args[0], (Double) args[1] != null ? (Double) args[1] : 0.0d, (Integer) args[2]); + }); + + static { + PARSER.declareDouble(constructorArg(), DCG_FIELD); + PARSER.declareDouble(optionalConstructorArg(), IDCG_FIELD); + PARSER.declareInt(constructorArg(), UNRATED_FIELD); + } + + public static Detail fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(this.dcg); + out.writeDouble(this.idcg); + out.writeVInt(this.unratedDocs); + } + + @Override + public String getWriteableName() { + return NAME; + } + + /** + * @return the discounted cumulative gain + */ + public double getDCG() { + return this.dcg; + } + + /** + * @return the ideal discounted cumulative gain, can be 0 if nothing was computed, e.g. because no normalization was required + */ + public double getIDCG() { + return this.idcg; + } + + /** + * @return the normalized discounted cumulative gain, can be 0 if nothing was computed, e.g. because no normalization was required + */ + public double getNDCG() { + return (this.idcg != 0) ? this.dcg / this.idcg : 0; + } + + /** + * @return the number of unrated documents in the search results + */ + public Object getUnratedDocs() { + return this.unratedDocs; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + DiscountedCumulativeGain.Detail other = (DiscountedCumulativeGain.Detail) obj; + return (this.dcg == other.dcg && + this.idcg == other.idcg && + this.unratedDocs == other.unratedDocs); + } + + @Override + public int hashCode() { + return Objects.hash(this.dcg, this.idcg, this.unratedDocs); + } + } } + diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java index c5785ca3847d4..f2176113cdf9d 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalNamedXContentProvider.java @@ -41,6 +41,8 @@ public List getNamedXContentParsers() { PrecisionAtK.Detail::fromXContent)); namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(MeanReciprocalRank.NAME), MeanReciprocalRank.Detail::fromXContent)); + namedXContent.add(new NamedXContentRegistry.Entry(MetricDetail.class, new ParseField(DiscountedCumulativeGain.NAME), + DiscountedCumulativeGain.Detail::fromXContent)); return namedXContent; } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java index 884cf3bafdcda..8ac2b7fbee528 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalPlugin.java @@ -61,8 +61,9 @@ public List getNamedWriteables() { namedWriteables.add( new NamedWriteableRegistry.Entry(EvaluationMetric.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, PrecisionAtK.NAME, PrecisionAtK.Detail::new)); - namedWriteables - .add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(MetricDetail.class, MeanReciprocalRank.NAME, MeanReciprocalRank.Detail::new)); + namedWriteables.add( + new NamedWriteableRegistry.Entry(MetricDetail.class, DiscountedCumulativeGain.NAME, DiscountedCumulativeGain.Detail::new)); return namedWriteables; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index 64337786b1eb6..24ac600a11398 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; @@ -254,9 +255,8 @@ private void assertParsedCorrect(String xContent, Integer expectedUnknownDocRati public static DiscountedCumulativeGain createTestItem() { boolean normalize = randomBoolean(); - Integer unknownDocRating = Integer.valueOf(randomIntBetween(0, 1000)); - - return new DiscountedCumulativeGain(normalize, unknownDocRating, 10); + Integer unknownDocRating = frequently() ? Integer.valueOf(randomIntBetween(0, 1000)) : null; + return new DiscountedCumulativeGain(normalize, unknownDocRating, randomIntBetween(1, 10)); } public void testXContentRoundtrip() throws IOException { @@ -283,7 +283,25 @@ public void testXContentParsingIsNotLenient() throws IOException { parser.nextToken(); XContentParseException exception = expectThrows(XContentParseException.class, () -> DiscountedCumulativeGain.fromXContent(parser)); - assertThat(exception.getMessage(), containsString("[dcg_at] unknown field")); + assertThat(exception.getMessage(), containsString("[dcg] unknown field")); + } + } + + public void testMetricDetails() { + double dcg = randomDoubleBetween(0, 1, true); + double idcg = randomBoolean() ? 0.0 : randomDoubleBetween(0, 1, true); + double expectedNdcg = idcg != 0 ? dcg / idcg : 0.0; + int unratedDocs = randomIntBetween(0, 100); + DiscountedCumulativeGain.Detail detail = new DiscountedCumulativeGain.Detail(dcg, idcg, unratedDocs); + assertEquals(dcg, detail.getDCG(), 0.0); + assertEquals(idcg, detail.getIDCG(), 0.0); + assertEquals(expectedNdcg, detail.getNDCG(), 0.0); + assertEquals(unratedDocs, detail.getUnratedDocs()); + if (idcg != 0) { + assertEquals("{\"dcg\":{\"dcg\":" + dcg + ",\"ideal_dcg\":" + idcg + ",\"normalized_dcg\":" + expectedNdcg + + ",\"unrated_docs\":" + unratedDocs + "}}", Strings.toString(detail)); + } else { + assertEquals("{\"dcg\":{\"dcg\":" + dcg + ",\"unrated_docs\":" + unratedDocs + "}}", Strings.toString(detail)); } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java index 112cf4eaaf72e..e9fae6b5c63ee 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java @@ -68,10 +68,20 @@ public static EvalQueryQuality randomEvalQueryQuality() { EvalQueryQuality evalQueryQuality = new EvalQueryQuality(randomAlphaOfLength(10), randomDoubleBetween(0.0, 1.0, true)); if (randomBoolean()) { - if (randomBoolean()) { + int metricDetail = randomIntBetween(0, 2); + switch (metricDetail) { + case 0: evalQueryQuality.setMetricDetails(new PrecisionAtK.Detail(randomIntBetween(0, 1000), randomIntBetween(0, 1000))); - } else { + break; + case 1: evalQueryQuality.setMetricDetails(new MeanReciprocalRank.Detail(randomIntBetween(0, 1000))); + break; + case 2: + evalQueryQuality.setMetricDetails(new DiscountedCumulativeGain.Detail(randomDoubleBetween(0, 1, true), + randomBoolean() ? randomDoubleBetween(0, 1, true) : 0, randomInt())); + break; + default: + throw new IllegalArgumentException("illegal randomized value in test"); } } evalQueryQuality.addHitsAndRatings(ratedHits); diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/10_rank_eval_templated.yml b/modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml similarity index 100% rename from qa/smoke-test-rank-eval-with-mustache/src/test/resources/rest-api-spec/test/rank-eval/10_rank_eval_templated.yml rename to modules/rank-eval/src/test/resources/rest-api-spec/test/rank_eval/40_rank_eval_templated.yml diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index cb31d44454452..473985d21091b 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -19,252 +19,58 @@ package org.elasticsearch.http.netty4; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; import io.netty.channel.Channel; -import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.codec.http.cookie.ServerCookieDecoder; -import io.netty.handler.codec.http.cookie.ServerCookieEncoder; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.http.HttpHandlingSettings; -import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.rest.AbstractRestChannel; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.HttpResponse; import org.elasticsearch.transport.netty4.Netty4Utils; -import java.util.Collections; -import java.util.EnumMap; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.net.InetSocketAddress; -final class Netty4HttpChannel extends AbstractRestChannel { +public class Netty4HttpChannel implements HttpChannel { - private final Netty4HttpServerTransport transport; private final Channel channel; - private final FullHttpRequest nettyRequest; - private final int sequence; - private final ThreadContext threadContext; - private final HttpHandlingSettings handlingSettings; - /** - * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. - * @param request The request that is handled by this channel. - * @param sequence The pipelining sequence number for this request - * @param handlingSettings true if error messages should include stack traces. - * @param threadContext the thread context for the channel - */ - Netty4HttpChannel(Netty4HttpServerTransport transport, Netty4HttpRequest request, int sequence, HttpHandlingSettings handlingSettings, - ThreadContext threadContext) { - super(request, handlingSettings.getDetailedErrorsEnabled()); - this.transport = transport; - this.channel = request.getChannel(); - this.nettyRequest = request.request(); - this.sequence = sequence; - this.threadContext = threadContext; - this.handlingSettings = handlingSettings; + Netty4HttpChannel(Channel channel) { + this.channel = channel; } @Override - protected BytesStreamOutput newBytesOutput() { - return new ReleasableBytesStreamOutput(transport.bigArrays); - } - - @Override - public void sendResponse(RestResponse response) { - // if the response object was created upstream, then use it; - // otherwise, create a new one - ByteBuf buffer = Netty4Utils.toByteBuf(response.content()); - final FullHttpResponse resp; - if (HttpMethod.HEAD.equals(nettyRequest.method())) { - resp = newResponse(Unpooled.EMPTY_BUFFER); - } else { - resp = newResponse(buffer); - } - resp.setStatus(getStatus(response.status())); - - Netty4CorsHandler.setCorsResponseHeaders(nettyRequest, resp, transport.getCorsConfig()); - - String opaque = nettyRequest.headers().get("X-Opaque-Id"); - if (opaque != null) { - setHeaderField(resp, "X-Opaque-Id", opaque); - } - - // Add all custom headers - addCustomHeaders(resp, response.getHeaders()); - addCustomHeaders(resp, threadContext.getResponseHeaders()); - - BytesReference content = response.content(); - boolean releaseContent = content instanceof Releasable; - boolean releaseBytesStreamOutput = bytesOutputOrNull() instanceof ReleasableBytesStreamOutput; - try { - // If our response doesn't specify a content-type header, set one - setHeaderField(resp, HttpHeaderNames.CONTENT_TYPE.toString(), response.contentType(), false); - // If our response has no content-length, calculate and set one - setHeaderField(resp, HttpHeaderNames.CONTENT_LENGTH.toString(), String.valueOf(buffer.readableBytes()), false); - - addCookies(resp); - - final ChannelPromise promise = channel.newPromise(); - - if (releaseContent) { - promise.addListener(f -> ((Releasable) content).close()); - } - - if (releaseBytesStreamOutput) { - promise.addListener(f -> bytesOutputOrNull().close()); - } - - if (isCloseConnection()) { - promise.addListener(ChannelFutureListener.CLOSE); - } - - Netty4HttpResponse newResponse = new Netty4HttpResponse(sequence, resp); - - channel.writeAndFlush(newResponse, promise); - releaseContent = false; - releaseBytesStreamOutput = false; - } finally { - if (releaseContent) { - ((Releasable) content).close(); - } - if (releaseBytesStreamOutput) { - bytesOutputOrNull().close(); - } - } - } - - private void setHeaderField(HttpResponse resp, String headerField, String value) { - setHeaderField(resp, headerField, value, true); - } - - private void setHeaderField(HttpResponse resp, String headerField, String value, boolean override) { - if (override || !resp.headers().contains(headerField)) { - resp.headers().add(headerField, value); - } - } - - private void addCookies(HttpResponse resp) { - if (handlingSettings.isResetCookies()) { - String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); - if (cookieString != null) { - Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); - if (!cookies.isEmpty()) { - // Reset the cookies if necessary. - resp.headers().set(HttpHeaderNames.SET_COOKIE, ServerCookieEncoder.STRICT.encode(cookies)); - } - } - } - } - - private void addCustomHeaders(HttpResponse response, Map> customHeaders) { - if (customHeaders != null) { - for (Map.Entry> headerEntry : customHeaders.entrySet()) { - for (String headerValue : headerEntry.getValue()) { - setHeaderField(response, headerEntry.getKey(), headerValue); + public void sendResponse(HttpResponse response, ActionListener listener) { + ChannelPromise writePromise = channel.newPromise(); + writePromise.addListener(f -> { + if (f.isSuccess()) { + listener.onResponse(null); + } else { + final Throwable cause = f.cause(); + Netty4Utils.maybeDie(cause); + if (cause instanceof Error) { + listener.onFailure(new Exception(cause)); + } else { + listener.onFailure((Exception) cause); } } - } + }); + channel.writeAndFlush(response, writePromise); } - // Determine if the request protocol version is HTTP 1.0 - private boolean isHttp10() { - return nettyRequest.protocolVersion().equals(HttpVersion.HTTP_1_0); - } - - // Determine if the request connection should be closed on completion. - private boolean isCloseConnection() { - final boolean http10 = isHttp10(); - return HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) || - (http10 && !HttpHeaderValues.KEEP_ALIVE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION))); + @Override + public InetSocketAddress getLocalAddress() { + return (InetSocketAddress) channel.localAddress(); } - // Create a new {@link HttpResponse} to transmit the response for the netty request. - private FullHttpResponse newResponse(ByteBuf buffer) { - final boolean http10 = isHttp10(); - final boolean close = isCloseConnection(); - // Build the response object. - final HttpResponseStatus status = HttpResponseStatus.OK; // default to initialize - final FullHttpResponse response; - if (http10) { - response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0, status, buffer); - if (!close) { - response.headers().add(HttpHeaderNames.CONNECTION, "Keep-Alive"); - } - } else { - response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status, buffer); - } - return response; + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) channel.remoteAddress(); } - private static Map MAP; - - static { - EnumMap map = new EnumMap<>(RestStatus.class); - map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE); - map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS); - map.put(RestStatus.OK, HttpResponseStatus.OK); - map.put(RestStatus.CREATED, HttpResponseStatus.CREATED); - map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED); - map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION); - map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT); - map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT); - map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT); - map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this?? - map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES); - map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY); - map.put(RestStatus.FOUND, HttpResponseStatus.FOUND); - map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER); - map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED); - map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY); - map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT); - map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED); - map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED); - map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN); - map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND); - map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED); - map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE); - map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED); - map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT); - map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT); - map.put(RestStatus.GONE, HttpResponseStatus.GONE); - map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED); - map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED); - map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); - map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG); - map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE); - map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE); - map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED); - map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); - map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); - map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); - map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); - map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE); - map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT); - map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED); - MAP = Collections.unmodifiableMap(map); + @Override + public void close() { + channel.close(); } - private static HttpResponseStatus getStatus(RestStatus status) { - return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); + public Channel getNettyChannel() { + return channel; } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index 12c2e9a685778..e6436ccea1a93 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -66,7 +66,7 @@ public void write(final ChannelHandlerContext ctx, final Object msg, final Chann try { List> readyResponses = aggregator.write(response, promise); for (Tuple readyResponse : readyResponses) { - ctx.write(readyResponse.v1().getResponse(), readyResponse.v2()); + ctx.write(readyResponse.v1(), readyResponse.v2()); } success = true; } catch (IllegalStateException e) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java index 2ce6ffada67f0..ffabe5cbbe224 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java @@ -19,17 +19,22 @@ package org.elasticsearch.http.netty4; -import io.netty.channel.Channel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpRequest; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.netty4.Netty4Utils; -import java.net.SocketAddress; import java.util.AbstractMap; import java.util.Collection; import java.util.Collections; @@ -38,53 +43,16 @@ import java.util.Set; import java.util.stream.Collectors; -public class Netty4HttpRequest extends RestRequest { - +public class Netty4HttpRequest implements HttpRequest { private final FullHttpRequest request; - private final Channel channel; private final BytesReference content; + private final HttpHeadersMap headers; + private final int sequence; - /** - * Construct a new request. - * - * @param xContentRegistry the content registry - * @param request the underlying request - * @param channel the channel for the request - * @throws BadParameterException if the parameters can not be decoded - * @throws ContentTypeHeaderException if the Content-Type header can not be parsed - */ - Netty4HttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request, Channel channel) { - super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); - this.request = request; - this.channel = channel; - if (request.content().isReadable()) { - this.content = Netty4Utils.toBytesReference(request.content()); - } else { - this.content = BytesArray.EMPTY; - } - } - - /** - * Construct a new request. In contrast to - * {@link Netty4HttpRequest#Netty4HttpRequest(NamedXContentRegistry, Map, String, FullHttpRequest, Channel)}, the URI is not decoded so - * this constructor will not throw a {@link BadParameterException}. - * - * @param xContentRegistry the content registry - * @param params the parameters for the request - * @param uri the path for the request - * @param request the underlying request - * @param channel the channel for the request - * @throws ContentTypeHeaderException if the Content-Type header can not be parsed - */ - Netty4HttpRequest( - final NamedXContentRegistry xContentRegistry, - final Map params, - final String uri, - final FullHttpRequest request, - final Channel channel) { - super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + Netty4HttpRequest(FullHttpRequest request, int sequence) { this.request = request; - this.channel = channel; + headers = new HttpHeadersMap(request.headers()); + this.sequence = sequence; if (request.content().isReadable()) { this.content = Netty4Utils.toBytesReference(request.content()); } else { @@ -92,43 +60,39 @@ public class Netty4HttpRequest extends RestRequest { } } - public FullHttpRequest request() { - return this.request; - } - @Override - public Method method() { + public RestRequest.Method method() { HttpMethod httpMethod = request.method(); if (httpMethod == HttpMethod.GET) - return Method.GET; + return RestRequest.Method.GET; if (httpMethod == HttpMethod.POST) - return Method.POST; + return RestRequest.Method.POST; if (httpMethod == HttpMethod.PUT) - return Method.PUT; + return RestRequest.Method.PUT; if (httpMethod == HttpMethod.DELETE) - return Method.DELETE; + return RestRequest.Method.DELETE; if (httpMethod == HttpMethod.HEAD) { - return Method.HEAD; + return RestRequest.Method.HEAD; } if (httpMethod == HttpMethod.OPTIONS) { - return Method.OPTIONS; + return RestRequest.Method.OPTIONS; } if (httpMethod == HttpMethod.PATCH) { - return Method.PATCH; + return RestRequest.Method.PATCH; } if (httpMethod == HttpMethod.TRACE) { - return Method.TRACE; + return RestRequest.Method.TRACE; } if (httpMethod == HttpMethod.CONNECT) { - return Method.CONNECT; + return RestRequest.Method.CONNECT; } throw new IllegalArgumentException("Unexpected http method: " + httpMethod); @@ -140,39 +104,63 @@ public String uri() { } @Override - public boolean hasContent() { - return content.length() > 0; + public BytesReference content() { + return content; + } + + + @Override + public final Map> getHeaders() { + return headers; } @Override - public BytesReference content() { - return content; + public List strictCookies() { + String cookieString = request.headers().get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + return ServerCookieEncoder.STRICT.encode(cookies); + } + } + return Collections.emptyList(); } - /** - * Returns the remote address where this rest request channel is "connected to". The - * returned {@link SocketAddress} is supposed to be down-cast into more - * concrete type such as {@link java.net.InetSocketAddress} to retrieve - * the detailed information. - */ @Override - public SocketAddress getRemoteAddress() { - return channel.remoteAddress(); + public HttpVersion protocolVersion() { + if (request.protocolVersion().equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_0)) { + return HttpRequest.HttpVersion.HTTP_1_0; + } else if (request.protocolVersion().equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_1)) { + return HttpRequest.HttpVersion.HTTP_1_1; + } else { + throw new IllegalArgumentException("Unexpected http protocol version: " + request.protocolVersion()); + } } - /** - * Returns the local address where this request channel is bound to. The returned - * {@link SocketAddress} is supposed to be down-cast into more concrete - * type such as {@link java.net.InetSocketAddress} to retrieve the detailed - * information. - */ @Override - public SocketAddress getLocalAddress() { - return channel.localAddress(); + public HttpRequest removeHeader(String header) { + HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove(header); + HttpHeaders trailingHeaders = new DefaultHttpHeaders(); + trailingHeaders.add(request.trailingHeaders()); + trailingHeaders.remove(header); + FullHttpRequest requestWithoutHeader = new DefaultFullHttpRequest(request.protocolVersion(), request.method(), request.uri(), + request.content(), headersWithoutContentTypeHeader, trailingHeaders); + return new Netty4HttpRequest(requestWithoutHeader, sequence); + } + + @Override + public Netty4HttpResponse createResponse(RestStatus status, BytesReference content) { + return new Netty4HttpResponse(this, status, content); + } + + public FullHttpRequest nettyRequest() { + return request; } - public Channel getChannel() { - return channel; + int sequence() { + return sequence; } /** @@ -249,7 +237,7 @@ public Collection> values() { @Override public Set>> entrySet() { return httpHeaders.names().stream().map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) - .collect(Collectors.toSet()); + .collect(Collectors.toSet()); } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index c3a010226a408..4547a63a9a278 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -20,112 +20,51 @@ package org.elasticsearch.http.netty4; import io.netty.buffer.Unpooled; -import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.HttpHeaders; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.http.HttpPipelinedRequest; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; -import java.util.Collections; - @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler> { private final Netty4HttpServerTransport serverTransport; - private final HttpHandlingSettings handlingSettings; - private final ThreadContext threadContext; - Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings, - ThreadContext threadContext) { + Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport) { this.serverTransport = serverTransport; - this.handlingSettings = handlingSettings; - this.threadContext = threadContext; } @Override protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) throws Exception { - final FullHttpRequest request = msg.getRequest(); + Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get(); + FullHttpRequest request = msg.getRequest(); try { + final FullHttpRequest copiedRequest = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); - final FullHttpRequest copy = - new DefaultFullHttpRequest( - request.protocolVersion(), - request.method(), - request.uri(), - Unpooled.copiedBuffer(request.content()), - request.headers(), - request.trailingHeaders()); - - Exception badRequestCause = null; - - /* - * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there - * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we - * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, - * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the - * underlying exception that caused us to treat the request as bad. - */ - final Netty4HttpRequest httpRequest; - { - Netty4HttpRequest innerHttpRequest; - try { - innerHttpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); - } catch (final RestRequest.ContentTypeHeaderException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutContentTypeHeader(copy, ctx.channel(), badRequestCause); - } catch (final RestRequest.BadParameterException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutParameters(copy, ctx.channel()); - } - httpRequest = innerHttpRequest; - } - - /* - * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid - * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an - * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these - * parameter values. - */ - final Netty4HttpChannel channel; - { - Netty4HttpChannel innerChannel; - try { - innerChannel = - new Netty4HttpChannel(serverTransport, httpRequest, msg.getSequence(), handlingSettings, threadContext); - } catch (final IllegalArgumentException e) { - if (badRequestCause == null) { - badRequestCause = e; - } else { - badRequestCause.addSuppressed(e); - } - final Netty4HttpRequest innerRequest = - new Netty4HttpRequest( - serverTransport.xContentRegistry, - Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters - copy.uri(), - copy, - ctx.channel()); - innerChannel = - new Netty4HttpChannel(serverTransport, innerRequest, msg.getSequence(), handlingSettings, threadContext); - } - channel = innerChannel; - } + Netty4HttpRequest httpRequest = new Netty4HttpRequest(copiedRequest, msg.getSequence()); if (request.decoderResult().isFailure()) { - serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } else if (badRequestCause != null) { - serverTransport.dispatchBadRequest(httpRequest, channel, badRequestCause); + Throwable cause = request.decoderResult().cause(); + if (cause instanceof Error) { + ExceptionsHelper.dieOnError(cause); + serverTransport.incomingRequestError(httpRequest, channel, new Exception(cause)); + } else { + serverTransport.incomingRequestError(httpRequest, channel, (Exception) cause); + } } else { - serverTransport.dispatchRequest(httpRequest, channel); + serverTransport.incomingRequest(httpRequest, channel); } } finally { // As we have copied the buffer, we can release the request @@ -133,32 +72,6 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("http.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope); - protected final BigArrays bigArrays; + private final ByteSizeValue maxInitialLineLength; + private final ByteSizeValue maxHeaderSize; + private final ByteSizeValue maxChunkSize; - protected final ByteSizeValue maxInitialLineLength; - protected final ByteSizeValue maxHeaderSize; - protected final ByteSizeValue maxChunkSize; + private final int workerCount; - protected final int workerCount; + private final int pipeliningMaxEvents; - protected final int pipeliningMaxEvents; + private final boolean tcpNoDelay; + private final boolean tcpKeepAlive; + private final boolean reuseAddress; - /** - * The registry used to construct parsers so they support {@link XContentParser#namedObject(Class, String, Object)}. - */ - protected final NamedXContentRegistry xContentRegistry; - - protected final boolean tcpNoDelay; - protected final boolean tcpKeepAlive; - protected final boolean reuseAddress; - - protected final ByteSizeValue tcpSendBufferSize; - protected final ByteSizeValue tcpReceiveBufferSize; - protected final RecvByteBufAllocator recvByteBufAllocator; + private final ByteSizeValue tcpSendBufferSize; + private final ByteSizeValue tcpReceiveBufferSize; + private final RecvByteBufAllocator recvByteBufAllocator; private final int readTimeoutMillis; - protected final int maxCompositeBufferComponents; + private final int maxCompositeBufferComponents; protected volatile ServerBootstrap serverBootstrap; protected final List serverChannels = new ArrayList<>(); - protected final HttpHandlingSettings httpHandlingSettings; - // package private for testing Netty4OpenChannelsHandler serverOpenChannels; @@ -189,16 +179,13 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { - super(settings, networkService, threadPool, dispatcher); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); - this.bigArrays = bigArrays; - this.xContentRegistry = xContentRegistry; this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); - this.httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); @@ -398,26 +385,27 @@ protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throw } public ChannelHandler configureServerChannelHandler() { - return new HttpChannelHandler(this, httpHandlingSettings, threadPool.getThreadContext()); + return new HttpChannelHandler(this, handlingSettings); } + static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("es-http-channel"); + protected static class HttpChannelHandler extends ChannelInitializer { private final Netty4HttpServerTransport transport; private final Netty4HttpRequestHandler requestHandler; private final HttpHandlingSettings handlingSettings; - protected HttpChannelHandler( - final Netty4HttpServerTransport transport, - final HttpHandlingSettings handlingSettings, - final ThreadContext threadContext) { + protected HttpChannelHandler(final Netty4HttpServerTransport transport, final HttpHandlingSettings handlingSettings) { this.transport = transport; this.handlingSettings = handlingSettings; - this.requestHandler = new Netty4HttpRequestHandler(transport, handlingSettings, threadContext); + this.requestHandler = new Netty4HttpRequestHandler(transport); } @Override protected void initChannel(Channel ch) throws Exception { + Netty4HttpChannel nettyTcpChannel = new Netty4HttpChannel(ch); + ch.attr(HTTP_CHANNEL_KEY).set(nettyTcpChannel); ch.pipeline().addLast("openChannels", transport.serverOpenChannels); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); final HttpRequestDecoder decoder = new HttpRequestDecoder( diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java index 779eb4fe2e465..38d832d608051 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsHandler.java @@ -22,6 +22,7 @@ import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaders; @@ -30,6 +31,7 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; import org.elasticsearch.common.Strings; +import org.elasticsearch.http.netty4.Netty4HttpResponse; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -76,6 +78,14 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception ctx.fireChannelRead(msg); } + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + assert msg instanceof Netty4HttpResponse : "Invalid message type: " + msg.getClass(); + Netty4HttpResponse response = (Netty4HttpResponse) msg; + setCorsResponseHeaders(response.getRequest().nettyRequest(), response, config); + ctx.write(response, promise);; + } + public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, Netty4CorsConfig config) { if (!config.isCorsSupportEnabled()) { return; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index c6655b58bc3bd..70afcc86ad8f9 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -87,8 +87,8 @@ public Map> getTransports(Settings settings, ThreadP @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index f4818a2e56752..466c4b68bfa4e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -333,10 +333,10 @@ protected void initChannel(Channel ch) throws Exception { addClosedExceptionLogger(ch); NettyTcpChannel nettyTcpChannel = new NettyTcpChannel(ch, name); ch.attr(CHANNEL_KEY).set(nettyTcpChannel); - serverAcceptedChannel(nettyTcpChannel); ch.pipeline().addLast("logging", new ESLoggingHandler()); ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name)); + serverAcceptedChannel(nettyTcpChannel); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java index f650e757e7a62..89fabdcd763d1 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java @@ -98,8 +98,11 @@ public void sendMessage(BytesReference reference, ActionListener listener) } else { final Throwable cause = f.cause(); Netty4Utils.maybeDie(cause); - assert cause instanceof Exception; - listener.onFailure((Exception) cause); + if (cause instanceof Error) { + listener.onFailure(new Exception(cause)); + } else { + listener.onFailure((Exception) cause); + } } }); channel.writeAndFlush(Netty4Utils.toByteBuf(reference), writePromise); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4CorsTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4CorsTests.java new file mode 100644 index 0000000000000..15a0850f64d38 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4CorsTests.java @@ -0,0 +1,148 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class Netty4CorsTests extends ESTestCase { + + public void testCorsEnabledWithoutAllowOrigins() { + // Set up a HTTP transport with only the CORS enabled setting + Settings settings = Settings.builder() + .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, "remote-host", "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); + } + + public void testCorsEnabledWithAllowOrigins() { + final String originValue = "remote-host"; + // create a http transport with CORS enabled and allow origin configured + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testCorsAllowOriginWithSameHost() { + String originValue = "remote-host"; + String host = "remote-host"; + // create a http transport with CORS enabled + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, originValue, host); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = "http://" + originValue; + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue + ":5555"; + host = host + ":5555"; + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue.replace("http", "https"); + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testThatStringLiteralWorksOnMatch() { + final String originValue = "remote-host"; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") + .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); + } + + public void testThatAnyOriginWorks() { + final String originValue = Netty4CorsHandler.ANY_ORIGIN; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); + } + + private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) { + // construct request and send it over the transport layer + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (originValue != null) { + httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); + } + httpRequest.headers().add(HttpHeaderNames.HOST, host); + EmbeddedChannel embeddedChannel = new EmbeddedChannel(); + embeddedChannel.pipeline().addLast(new Netty4CorsHandler(Netty4HttpServerTransport.buildCorsConfig(settings))); + Netty4HttpRequest nettyRequest = new Netty4HttpRequest(httpRequest, 0); + embeddedChannel.writeOutbound(nettyRequest.createResponse(RestStatus.OK, new BytesArray("content"))); + return embeddedChannel.readOutbound(); + } +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java deleted file mode 100644 index 7c5b35a322996..0000000000000 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ /dev/null @@ -1,616 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http.netty4; - -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.Unpooled; -import io.netty.channel.Channel; -import io.netty.channel.ChannelConfig; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelId; -import io.netty.channel.ChannelMetadata; -import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelProgressivePromise; -import io.netty.channel.ChannelPromise; -import io.netty.channel.EventLoop; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.util.Attribute; -import io.netty.util.AttributeKey; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.ByteArray; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.MockPageCacheRecycler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.http.HttpHandlingSettings; -import org.elasticsearch.http.HttpTransportSettings; -import org.elasticsearch.http.NullDispatcher; -import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.netty4.Netty4Utils; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.SocketAddress; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class Netty4HttpChannelTests extends ESTestCase { - - private NetworkService networkService; - private ThreadPool threadPool; - private MockBigArrays bigArrays; - - @Before - public void setup() throws Exception { - networkService = new NetworkService(Collections.emptyList()); - threadPool = new TestThreadPool("test"); - bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - } - - @After - public void shutdown() throws Exception { - if (threadPool != null) { - threadPool.shutdownNow(); - } - } - - public void testResponse() { - final FullHttpResponse response = executeRequest(Settings.EMPTY, "request-host"); - assertThat(response.content(), equalTo(Netty4Utils.toByteBuf(new TestResponse().content()))); - } - - public void testCorsEnabledWithoutAllowOrigins() { - // Set up a HTTP transport with only the CORS enabled setting - Settings settings = Settings.builder() - .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) - .build(); - HttpResponse response = executeRequest(settings, "remote-host", "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); - } - - public void testCorsEnabledWithAllowOrigins() { - final String originValue = "remote-host"; - // create a http transport with CORS enabled and allow origin configured - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) - .build(); - HttpResponse response = executeRequest(settings, originValue, "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - } - - public void testCorsAllowOriginWithSameHost() { - String originValue = "remote-host"; - String host = "remote-host"; - // create a http transport with CORS enabled - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .build(); - HttpResponse response = executeRequest(settings, originValue, host); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - - originValue = "http://" + originValue; - response = executeRequest(settings, originValue, host); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - - originValue = originValue + ":5555"; - host = host + ":5555"; - response = executeRequest(settings, originValue, host); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - - originValue = originValue.replace("http", "https"); - response = executeRequest(settings, originValue, host); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - } - - public void testThatStringLiteralWorksOnMatch() { - final String originValue = "remote-host"; - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) - .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") - .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) - .build(); - HttpResponse response = executeRequest(settings, originValue, "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); - } - - public void testThatAnyOriginWorks() { - final String originValue = Netty4CorsHandler.ANY_ORIGIN; - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) - .build(); - HttpResponse response = executeRequest(settings, originValue, "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); - } - - public void testHeadersSet() { - Settings settings = Settings.builder().build(); - try (Netty4HttpServerTransport httpServerTransport = - new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), - new NullDispatcher())) { - httpServerTransport.start(); - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); - final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); - HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; - - // send a response - Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); - TestResponse resp = new TestResponse(); - final String customHeader = "custom-header"; - final String customHeaderValue = "xyz"; - resp.addHeader(customHeader, customHeaderValue); - channel.sendResponse(resp); - - // inspect what was written - List writtenObjects = writeCapturingChannel.getWrittenObjects(); - assertThat(writtenObjects.size(), is(1)); - HttpResponse response = ((Netty4HttpResponse) writtenObjects.get(0)).getResponse(); - assertThat(response.headers().get("non-existent-header"), nullValue()); - assertThat(response.headers().get(customHeader), equalTo(customHeaderValue)); - assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length()))); - assertThat(response.headers().get(HttpHeaderNames.CONTENT_TYPE), equalTo(resp.contentType())); - } - } - - public void testReleaseOnSendToClosedChannel() { - final Settings settings = Settings.builder().build(); - final NamedXContentRegistry registry = xContentRegistry(); - try (Netty4HttpServerTransport httpServerTransport = - new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, registry, new NullDispatcher())) { - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); - HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; - final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); - final TestResponse response = new TestResponse(bigArrays); - assertThat(response.content(), instanceOf(Releasable.class)); - embeddedChannel.close(); - channel.sendResponse(response); - // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released - } - } - - public void testReleaseOnSendToChannelAfterException() throws IOException { - final Settings settings = Settings.builder().build(); - final NamedXContentRegistry registry = xContentRegistry(); - try (Netty4HttpServerTransport httpServerTransport = - new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, registry, new NullDispatcher())) { - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); - HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; - final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); - final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, - JsonXContent.contentBuilder().startObject().endObject()); - assertThat(response.content(), not(instanceOf(Releasable.class))); - - // ensure we have reserved bytes - if (randomBoolean()) { - BytesStreamOutput out = channel.bytesOutput(); - assertThat(out, instanceOf(ReleasableBytesStreamOutput.class)); - } else { - try (XContentBuilder builder = channel.newBuilder()) { - // do something builder - builder.startObject().endObject(); - } - } - - channel.sendResponse(response); - // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released - } - } - - public void testConnectionClose() throws Exception { - final Settings settings = Settings.builder().build(); - try (Netty4HttpServerTransport httpServerTransport = - new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), new NullDispatcher())) { - httpServerTransport.start(); - final FullHttpRequest httpRequest; - final boolean close = randomBoolean(); - if (randomBoolean()) { - httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - if (close) { - httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); - } - } else { - httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/"); - if (!close) { - httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE); - } - } - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, embeddedChannel); - - // send a response, the channel close status should match - assertTrue(embeddedChannel.isOpen()); - HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; - final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); - final TestResponse resp = new TestResponse(); - channel.sendResponse(resp); - assertThat(embeddedChannel.isOpen(), equalTo(!close)); - } - } - - private FullHttpResponse executeRequest(final Settings settings, final String host) { - return executeRequest(settings, null, host); - } - - private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) { - // construct request and send it over the transport layer - try (Netty4HttpServerTransport httpServerTransport = - new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), - new NullDispatcher())) { - httpServerTransport.start(); - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - if (originValue != null) { - httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); - } - httpRequest.headers().add(HttpHeaderNames.HOST, host); - final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - final Netty4HttpRequest request = - new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); - HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; - - Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); - channel.sendResponse(new TestResponse()); - - // get the response - List writtenObjects = writeCapturingChannel.getWrittenObjects(); - assertThat(writtenObjects.size(), is(1)); - return ((Netty4HttpResponse) writtenObjects.get(0)).getResponse(); - } - } - - private static class WriteCapturingChannel implements Channel { - - private List writtenObjects = new ArrayList<>(); - - @Override - public ChannelId id() { - return null; - } - - @Override - public EventLoop eventLoop() { - return null; - } - - @Override - public Channel parent() { - return null; - } - - @Override - public ChannelConfig config() { - return null; - } - - @Override - public boolean isOpen() { - return false; - } - - @Override - public boolean isRegistered() { - return false; - } - - @Override - public boolean isActive() { - return false; - } - - @Override - public ChannelMetadata metadata() { - return null; - } - - @Override - public SocketAddress localAddress() { - return null; - } - - @Override - public SocketAddress remoteAddress() { - return null; - } - - @Override - public ChannelFuture closeFuture() { - return null; - } - - @Override - public boolean isWritable() { - return false; - } - - @Override - public long bytesBeforeUnwritable() { - return 0; - } - - @Override - public long bytesBeforeWritable() { - return 0; - } - - @Override - public Unsafe unsafe() { - return null; - } - - @Override - public ChannelPipeline pipeline() { - return null; - } - - @Override - public ByteBufAllocator alloc() { - return null; - } - - @Override - public Channel read() { - return null; - } - - @Override - public Channel flush() { - return null; - } - - @Override - public ChannelFuture bind(SocketAddress localAddress) { - return null; - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress) { - return null; - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress) { - return null; - } - - @Override - public ChannelFuture disconnect() { - return null; - } - - @Override - public ChannelFuture close() { - return null; - } - - @Override - public ChannelFuture deregister() { - return null; - } - - @Override - public ChannelFuture bind(SocketAddress localAddress, ChannelPromise promise) { - return null; - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress, ChannelPromise promise) { - return null; - } - - @Override - public ChannelFuture connect(SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) { - return null; - } - - @Override - public ChannelFuture disconnect(ChannelPromise promise) { - return null; - } - - @Override - public ChannelFuture close(ChannelPromise promise) { - return null; - } - - @Override - public ChannelFuture deregister(ChannelPromise promise) { - return null; - } - - @Override - public ChannelFuture write(Object msg) { - writtenObjects.add(msg); - return null; - } - - @Override - public ChannelFuture write(Object msg, ChannelPromise promise) { - writtenObjects.add(msg); - return null; - } - - @Override - public ChannelFuture writeAndFlush(Object msg, ChannelPromise promise) { - writtenObjects.add(msg); - return null; - } - - @Override - public ChannelFuture writeAndFlush(Object msg) { - writtenObjects.add(msg); - return null; - } - - @Override - public ChannelPromise newPromise() { - return null; - } - - @Override - public ChannelProgressivePromise newProgressivePromise() { - return null; - } - - @Override - public ChannelFuture newSucceededFuture() { - return null; - } - - @Override - public ChannelFuture newFailedFuture(Throwable cause) { - return null; - } - - @Override - public ChannelPromise voidPromise() { - return null; - } - - @Override - public Attribute attr(AttributeKey key) { - return null; - } - - @Override - public boolean hasAttr(AttributeKey key) { - return false; - } - - @Override - public int compareTo(Channel o) { - return 0; - } - - List getWrittenObjects() { - return writtenObjects; - } - - } - - private static class TestResponse extends RestResponse { - - private final BytesReference reference; - - TestResponse() { - reference = Netty4Utils.toBytesReference(Unpooled.copiedBuffer("content", StandardCharsets.UTF_8)); - } - - TestResponse(final BigArrays bigArrays) { - final byte[] bytes; - try { - bytes = "content".getBytes("UTF-8"); - } catch (final UnsupportedEncodingException e) { - throw new AssertionError(e); - } - final ByteArray bigArray = bigArrays.newByteArray(bytes.length); - bigArray.set(0, bytes, 0, bytes.length); - reference = new ReleasablePagedBytesReference(bigArrays, bigArray, bytes.length, Releasables.releaseOnce(bigArray)); - } - - @Override - public String contentType() { - return "text"; - } - - @Override - public BytesReference content() { - return reference; - } - - @Override - public RestStatus status() { - return RestStatus.OK; - } - - } - -} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java index f6c5dfd5a50b2..8b3ba19fe0144 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java @@ -19,15 +19,12 @@ package org.elasticsearch.http.netty4; -import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpMethod; @@ -35,7 +32,10 @@ import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.QueryStringDecoder; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -55,7 +55,6 @@ import java.util.stream.IntStream; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; -import static io.netty.handler.codec.http.HttpResponseStatus.OK; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import static org.hamcrest.core.Is.is; @@ -191,11 +190,11 @@ public void testPipeliningRequestsAreReleased() throws InterruptedException { ArrayList promises = new ArrayList<>(); for (int i = 1; i < requests.size(); ++i) { - final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); ChannelPromise promise = embeddedChannel.newPromise(); promises.add(promise); - int sequence = requests.get(i).getSequence(); - Netty4HttpResponse resp = new Netty4HttpResponse(sequence, httpResponse); + HttpPipelinedRequest pipelinedRequest = requests.get(i); + Netty4HttpRequest nioHttpRequest = new Netty4HttpRequest(pipelinedRequest.getRequest(), pipelinedRequest.getSequence()); + Netty4HttpResponse resp = nioHttpRequest.createResponse(RestStatus.OK, BytesArray.EMPTY); embeddedChannel.writeAndFlush(resp, promise); } @@ -233,10 +232,10 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) thro } - private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { + private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { @Override - protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { + protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { LastHttpContent request = pipelinedRequest.getRequest(); final QueryStringDecoder decoder; if (request instanceof FullHttpRequest) { @@ -246,9 +245,10 @@ protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedReques } final String uri = decoder.path().replace("/", ""); - final ByteBuf content = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); - final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK, content); - httpResponse.headers().add(CONTENT_LENGTH, content.readableBytes()); + final BytesReference content = new BytesArray(uri.getBytes(StandardCharsets.UTF_8)); + Netty4HttpRequest nioHttpRequest = new Netty4HttpRequest(pipelinedRequest.getRequest(), pipelinedRequest.getSequence()); + Netty4HttpResponse httpResponse = nioHttpRequest.createResponse(RestStatus.OK, content); + httpResponse.addHeader(CONTENT_LENGTH.toString(), Integer.toString(content.length())); final CountDownLatch waitingLatch = new CountDownLatch(1); waitingRequests.put(uri, waitingLatch); @@ -260,7 +260,7 @@ protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedReques waitingLatch.await(1000, TimeUnit.SECONDS); final ChannelPromise promise = ctx.newPromise(); eventLoopService.submit(() -> { - ctx.write(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + ctx.write(httpResponse, promise); finishingLatch.countDown(); }); } catch (InterruptedException e) { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index f2b28b909187b..3101f660d056e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -26,22 +26,20 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; -import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -120,7 +118,7 @@ class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { @Override public ChannelHandler configureServerChannelHandler() { - return new CustomHttpChannelHandler(this, executorService, Netty4HttpServerPipeliningTests.this.threadPool.getThreadContext()); + return new CustomHttpChannelHandler(this, executorService); } @Override @@ -135,8 +133,8 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha private final ExecutorService executorService; - CustomHttpChannelHandler(Netty4HttpServerTransport transport, ExecutorService executorService, ThreadContext threadContext) { - super(transport, transport.httpHandlingSettings, threadContext); + CustomHttpChannelHandler(Netty4HttpServerTransport transport, ExecutorService executorService) { + super(transport, transport.handlingSettings); this.executorService = executorService; } @@ -187,8 +185,9 @@ public void run() { final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); - final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, buffer); - httpResponse.headers().add(HttpHeaderNames.CONTENT_LENGTH, buffer.readableBytes()); + Netty4HttpRequest httpRequest = new Netty4HttpRequest(fullHttpRequest, pipelinedRequest.getSequence()); + Netty4HttpResponse response = httpRequest.createResponse(RestStatus.OK, new BytesArray(uri.getBytes(StandardCharsets.UTF_8))); + response.headers().add(HttpHeaderNames.CONTENT_LENGTH, buffer.readableBytes()); final boolean slow = uri.matches("/slow/\\d+"); if (slow) { @@ -202,7 +201,7 @@ public void run() { } final ChannelPromise promise = ctx.newPromise(); - ctx.writeAndFlush(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + ctx.writeAndFlush(response, promise); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 5b22409b92da0..bcf28506143bf 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -291,40 +291,6 @@ public void dispatchBadRequest(final RestRequest request, assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); } - public void testDispatchDoesNotModifyThreadContext() throws InterruptedException { - final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { - - @Override - public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { - threadContext.putHeader("foo", "bar"); - threadContext.putTransient("bar", "baz"); - } - - @Override - public void dispatchBadRequest(final RestRequest request, - final RestChannel channel, - final ThreadContext threadContext, - final Throwable cause) { - threadContext.putHeader("foo_bad", "bar"); - threadContext.putTransient("bar_bad", "baz"); - } - - }; - - try (Netty4HttpServerTransport transport = - new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { - transport.start(); - - transport.dispatchRequest(null, null); - assertNull(threadPool.getThreadContext().getHeader("foo")); - assertNull(threadPool.getThreadContext().getTransient("bar")); - - transport.dispatchBadRequest(null, null, null); - assertNull(threadPool.getThreadContext().getHeader("foo_bad")); - assertNull(threadPool.getThreadContext().getTransient("bar_bad")); - } - } - public void testReadTimeout() throws Exception { final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 3fdd3366122cb..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82d83fcac1d9c8948aa0247fc9c87f177ddbd59b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..03f1b7d27aed5 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +b91a260d8d12ee4b3302a63059c73a34de0ce146 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 855d6ebe4aeb0..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73fd4364f2931e7c8303b5927e140a7d21116c36 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..9a5c6669009eb --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +cc1ca9bd9e2c162dd1da8c2e7111913fd8033e48 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 091097f1a8477..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a2c4417fa9a8be078864f590a5a66b98d551cf5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..cbf4f78c31999 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +2fa3662a10a9e085b1c7b87293d727422cbe6224 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index b18addf0b5819..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6fa179924f139a30fc0e5399256e1a44562ed32b \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..bd5bf428b6d44 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +60aa50c11857e6739e68936cb45102562b2c46b4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 7b7141b6f407c..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ed135d34d7868b71a725257a46dc8d8735a15d4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..a73900802ace1 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +4586368007785a3be26db4b9ce404ffb8c76f350 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 73be96c477eab..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -875911b36b99c2103719f94559878a0ecb862fb6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..bf0a50f7154e5 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +9c6d030ab2c148df7a6ba73a774ef4b8c720a6cb \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0c85d3f6c8522..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7191628df8cb72382a20da79224aef677117849 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..ba6ceb2aed9d8 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +8275bf8df2644d5fcec2963cf237d14b6e00fefe \ No newline at end of file diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 7daf944f81898..b1c3b62fd6edf 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -53,6 +53,11 @@ test { systemProperty 'tests.artifact', project.name } +check { + // also execute the QA tests when testing the plugin + dependsOn 'qa:amazon-ec2:check' +} + thirdPartyAudit.excludes = [ // classes are missing 'com.amazonaws.jmespath.JmesPathEvaluationVisitor', diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle new file mode 100644 index 0000000000000..90fac9e80cd78 --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.test.AntFixture + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: ':plugins:discovery-ec2', configuration: 'runtime') +} + +final int ec2NumberOfNodes = 3 +File ec2DiscoveryFile = new File(project.buildDir, 'generated-resources/nodes.uri') + +/** A task to start the AmazonEC2Fixture which emulates an EC2 service **/ +task ec2Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, ec2DiscoveryFile.absolutePath +} + +Map expansions = [ + 'expected_nodes': ec2NumberOfNodes +] + +processTestResources { + inputs.properties(expansions) + MavenFilteringHack.filter(it, expansions) +} + +integTestCluster { + dependsOn ec2Fixture + numNodes = ec2NumberOfNodes + plugin ':plugins:discovery-ec2' + keystoreSetting 'discovery.ec2.access_key', 'ec2_integration_test_access_key' + keystoreSetting 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' + setting 'discovery.zen.hosts_provider', 'ec2' + setting 'discovery.ec2.endpoint', "http://${-> ec2Fixture.addressAndPort}" + unicastTransportUri = { seedNode, node, ant -> return null } + + waitCondition = { node, ant -> + ec2DiscoveryFile.parentFile.mkdirs() + ec2DiscoveryFile.setText(integTest.nodes.collect { n -> "${n.transportUri()}" }.join('\n'), 'UTF-8') + + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/", + dest: tmpFile.toString(), + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java similarity index 83% rename from qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java rename to plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java index b8b1607065cdd..09d5a8d6fdf28 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeTestRankEvalWithMustacheYAMLTestSuiteIT.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2DiscoveryClientYamlTestSuiteIT.java @@ -17,17 +17,16 @@ * under the License. */ -package org.elasticsearch.index.rankeval; +package org.elasticsearch.discovery.ec2; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; -public class SmokeTestRankEvalWithMustacheYAMLTestSuiteIT extends ESClientYamlSuiteTestCase { +public class AmazonEC2DiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - public SmokeTestRankEvalWithMustacheYAMLTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + public AmazonEC2DiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } @@ -35,5 +34,4 @@ public SmokeTestRankEvalWithMustacheYAMLTestSuiteIT(@Name("yaml") ClientYamlTest public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } - } diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java new file mode 100644 index 0000000000000..0cf4cbdeadb34 --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.ec2; + +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.fixture.AbstractHttpFixture; + +import javax.xml.XMLConstants; +import javax.xml.stream.XMLOutputFactory; +import javax.xml.stream.XMLStreamWriter; +import java.io.IOException; +import java.io.StringWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.UUID; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * {@link AmazonEC2Fixture} is a fixture that emulates an AWS EC2 service. + */ +public class AmazonEC2Fixture extends AbstractHttpFixture { + + private final Path nodes; + + private AmazonEC2Fixture(final String workingDir, final String nodesUriPath) { + super(workingDir); + this.nodes = toPath(Objects.requireNonNull(nodesUriPath)); + } + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonEC2Fixture "); + } + + final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1]); + fixture.listen(); + } + + @Override + protected Response handle(final Request request) throws IOException { + if ("/".equals(request.getPath()) && ("POST".equals(request.getMethod()))) { + final String userAgent = request.getHeader("User-Agent"); + if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { + // Simulate an EC2 DescribeInstancesResponse + byte[] responseBody = EMPTY_BYTE; + for (NameValuePair parse : URLEncodedUtils.parse(new String(request.getBody(), UTF_8), UTF_8)) { + if ("Action".equals(parse.getName())) { + responseBody = generateDescribeInstancesResponse(); + break; + } + } + return new Response(RestStatus.OK.getStatus(), contentType("text/xml; charset=UTF-8"), responseBody); + } + } + return null; + } + + /** + * Generates a XML response that describe the EC2 instances + */ + private byte[] generateDescribeInstancesResponse() { + final XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory(); + xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true); + + final StringWriter out = new StringWriter(); + XMLStreamWriter sw; + try { + sw = xmlOutputFactory.createXMLStreamWriter(out); + sw.writeStartDocument(); + + String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/"; + sw.setDefaultNamespace(namespace); + sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace); + { + sw.writeStartElement("requestId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("reservationSet"); + { + if (Files.exists(nodes)) { + for (String address : Files.readAllLines(nodes)) { + + sw.writeStartElement("item"); + { + sw.writeStartElement("reservationId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("instancesSet"); + { + sw.writeStartElement("item"); + { + sw.writeStartElement("instanceId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("imageId"); + sw.writeCharacters(UUID.randomUUID().toString()); + sw.writeEndElement(); + + sw.writeStartElement("instanceState"); + { + sw.writeStartElement("code"); + sw.writeCharacters("16"); + sw.writeEndElement(); + + sw.writeStartElement("name"); + sw.writeCharacters("running"); + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeStartElement("privateDnsName"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("dnsName"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("instanceType"); + sw.writeCharacters("m1.medium"); + sw.writeEndElement(); + + sw.writeStartElement("placement"); + { + sw.writeStartElement("availabilityZone"); + sw.writeCharacters("use-east-1e"); + sw.writeEndElement(); + + sw.writeEmptyElement("groupName"); + + sw.writeStartElement("tenancy"); + sw.writeCharacters("default"); + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeStartElement("privateIpAddress"); + sw.writeCharacters(address); + sw.writeEndElement(); + + sw.writeStartElement("ipAddress"); + sw.writeCharacters(address); + sw.writeEndElement(); + } + sw.writeEndElement(); + } + sw.writeEndElement(); + } + sw.writeEndElement(); + } + } + sw.writeEndElement(); + } + sw.writeEndElement(); + + sw.writeEndDocument(); + sw.flush(); + } + } catch (Exception e) { + throw new RuntimeException(e); + } + return out.toString().getBytes(UTF_8); + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path toPath(final String dir) { + return Paths.get(dir); + } +} diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml b/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml new file mode 100644 index 0000000000000..682327b72dd9e --- /dev/null +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/resources/rest-api-spec/test/discovery_ec2/10_basic.yml @@ -0,0 +1,15 @@ +# Integration tests for discovery-ec2 +setup: + - do: + cluster.health: + wait_for_status: green + wait_for_nodes: ${expected_nodes} + +--- +"All nodes are correctly discovered": + + - do: + nodes.info: + metric: [ transport ] + + - match: { _nodes.total: ${expected_nodes} } diff --git a/plugins/discovery-ec2/qa/build.gradle b/plugins/discovery-ec2/qa/build.gradle new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java deleted file mode 100644 index 49fd9de71ecfa..0000000000000 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryClusterFormationTests.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.ec2; - -import com.amazonaws.util.IOUtils; -import com.sun.net.httpserver.Headers; -import com.sun.net.httpserver.HttpServer; -import org.apache.http.NameValuePair; -import org.apache.http.client.utils.URLEncodedUtils; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.mocksocket.MockHttpServer; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import javax.xml.XMLConstants; -import javax.xml.stream.XMLOutputFactory; -import javax.xml.stream.XMLStreamException; -import javax.xml.stream.XMLStreamWriter; -import java.io.IOException; -import java.io.OutputStream; -import java.io.StringWriter; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -import static org.hamcrest.Matchers.equalTo; - -@ESIntegTestCase.ClusterScope(supportsDedicatedMasters = false, numDataNodes = 2, numClientNodes = 0) -@SuppressForbidden(reason = "use http server") -// TODO this should be a IT but currently all ITs in this project run against a real cluster -public class Ec2DiscoveryClusterFormationTests extends ESIntegTestCase { - - private static HttpServer httpServer; - private static Path logDir; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(Ec2DiscoveryPlugin.class); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Path resolve = logDir.resolve(Integer.toString(nodeOrdinal)); - try { - Files.createDirectory(resolve); - } catch (IOException e) { - throw new RuntimeException(e); - } - MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(AwsEc2Service.ACCESS_KEY_SETTING.getKey(), "some_access"); - secureSettings.setString(AwsEc2Service.SECRET_KEY_SETTING.getKey(), "some_secret"); - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), "ec2") - .put("path.logs", resolve) - .put("transport.tcp.port", 0) - .put("node.portsfile", "true") - .put(AwsEc2Service.ENDPOINT_SETTING.getKey(), "http://" + httpServer.getAddress().getHostName() + ":" + - httpServer.getAddress().getPort()) - .setSecureSettings(secureSettings) - .build(); - } - - /** - * Creates mock EC2 endpoint providing the list of started nodes to the DescribeInstances API call - */ - @BeforeClass - public static void startHttpd() throws Exception { - logDir = createTempDir(); - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); - - httpServer.createContext("/", (s) -> { - Headers headers = s.getResponseHeaders(); - headers.add("Content-Type", "text/xml; charset=UTF-8"); - String action = null; - for (NameValuePair parse : URLEncodedUtils.parse(IOUtils.toString(s.getRequestBody()), StandardCharsets.UTF_8)) { - if ("Action".equals(parse.getName())) { - action = parse.getValue(); - break; - } - } - assertThat(action, equalTo("DescribeInstances")); - - XMLOutputFactory xmlOutputFactory = XMLOutputFactory.newFactory(); - xmlOutputFactory.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true); - StringWriter out = new StringWriter(); - XMLStreamWriter sw; - try { - sw = xmlOutputFactory.createXMLStreamWriter(out); - sw.writeStartDocument(); - - String namespace = "http://ec2.amazonaws.com/doc/2013-02-01/"; - sw.setDefaultNamespace(namespace); - sw.writeStartElement(XMLConstants.DEFAULT_NS_PREFIX, "DescribeInstancesResponse", namespace); - { - sw.writeStartElement("requestId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("reservationSet"); - { - Path[] files = FileSystemUtils.files(logDir); - for (int i = 0; i < files.length; i++) { - Path resolve = files[i].resolve("transport.ports"); - if (Files.exists(resolve)) { - List addresses = Files.readAllLines(resolve); - Collections.shuffle(addresses, random()); - - sw.writeStartElement("item"); - { - sw.writeStartElement("reservationId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("instancesSet"); - { - sw.writeStartElement("item"); - { - sw.writeStartElement("instanceId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("imageId"); - sw.writeCharacters(UUID.randomUUID().toString()); - sw.writeEndElement(); - - sw.writeStartElement("instanceState"); - { - sw.writeStartElement("code"); - sw.writeCharacters("16"); - sw.writeEndElement(); - - sw.writeStartElement("name"); - sw.writeCharacters("running"); - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeStartElement("privateDnsName"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("dnsName"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("instanceType"); - sw.writeCharacters("m1.medium"); - sw.writeEndElement(); - - sw.writeStartElement("placement"); - { - sw.writeStartElement("availabilityZone"); - sw.writeCharacters("use-east-1e"); - sw.writeEndElement(); - - sw.writeEmptyElement("groupName"); - - sw.writeStartElement("tenancy"); - sw.writeCharacters("default"); - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeStartElement("privateIpAddress"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - - sw.writeStartElement("ipAddress"); - sw.writeCharacters(addresses.get(0)); - sw.writeEndElement(); - } - sw.writeEndElement(); - } - sw.writeEndElement(); - } - sw.writeEndElement(); - } - } - } - sw.writeEndElement(); - } - sw.writeEndElement(); - - sw.writeEndDocument(); - sw.flush(); - - final byte[] responseAsBytes = out.toString().getBytes(StandardCharsets.UTF_8); - s.sendResponseHeaders(200, responseAsBytes.length); - OutputStream responseBody = s.getResponseBody(); - responseBody.write(responseAsBytes); - responseBody.close(); - } catch (XMLStreamException e) { - Loggers.getLogger(Ec2DiscoveryClusterFormationTests.class).error("Failed serializing XML", e); - throw new RuntimeException(e); - } - }); - - httpServer.start(); - } - - @AfterClass - public static void stopHttpd() throws IOException { - for (int i = 0; i < internalCluster().size(); i++) { - // shut them all down otherwise we get spammed with connection refused exceptions - internalCluster().stopRandomDataNode(); - } - httpServer.stop(0); - httpServer = null; - logDir = null; - } - - public void testJoin() throws ExecutionException, InterruptedException { - // only wait for the cluster to form - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get()); - // add one more node and wait for it to join - internalCluster().startDataOnlyNode(); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get()); - } -} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 05f28e8254aa1..ad81719ebcbb9 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -23,54 +23,38 @@ import io.netty.channel.ChannelHandler; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; -import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.WriteOperation; -import org.elasticsearch.rest.RestRequest; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.function.BiConsumer; - public class HttpReadWriteHandler implements ReadWriteHandler { private final NettyAdaptor adaptor; - private final NioSocketChannel nioChannel; + private final NioHttpChannel nioHttpChannel; private final NioHttpServerTransport transport; - private final HttpHandlingSettings settings; - private final NamedXContentRegistry xContentRegistry; - private final NioCorsConfig corsConfig; - private final ThreadContext threadContext; - - HttpReadWriteHandler(NioSocketChannel nioChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, - NamedXContentRegistry xContentRegistry, NioCorsConfig corsConfig, ThreadContext threadContext) { - this.nioChannel = nioChannel; + + HttpReadWriteHandler(NioHttpChannel nioHttpChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, + NioCorsConfig corsConfig) { + this.nioHttpChannel = nioHttpChannel; this.transport = transport; - this.settings = settings; - this.xContentRegistry = xContentRegistry; - this.corsConfig = corsConfig; - this.threadContext = threadContext; List handlers = new ArrayList<>(5); HttpRequestDecoder decoder = new HttpRequestDecoder(settings.getMaxInitialLineLength(), settings.getMaxHeaderSize(), @@ -89,12 +73,12 @@ public class HttpReadWriteHandler implements ReadWriteHandler { handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents())); adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); - adaptor.addCloseListener((v, e) -> nioChannel.close()); + adaptor.addCloseListener((v, e) -> nioHttpChannel.close()); } @Override public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { - int bytesConsumed = adaptor.read(channelBuffer.sliceBuffersTo(channelBuffer.getIndex())); + int bytesConsumed = adaptor.read(channelBuffer.sliceAndRetainPagesTo(channelBuffer.getIndex())); Object message; while ((message = adaptor.pollInboundMessage()) != null) { handleRequest(message); @@ -150,95 +134,22 @@ private void handleRequest(Object msg) { request.headers(), request.trailingHeaders()); - Exception badRequestCause = null; - - /* - * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there - * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we - * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, - * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the - * underlying exception that caused us to treat the request as bad. - */ - final NioHttpRequest httpRequest; - { - NioHttpRequest innerHttpRequest; - try { - innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); - } catch (final RestRequest.ContentTypeHeaderException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); - } catch (final RestRequest.BadParameterException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutParameters(copiedRequest); - } - httpRequest = innerHttpRequest; - } - - /* - * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid - * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an - * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of - * these parameter values. - */ - final NioHttpChannel channel; - { - NioHttpChannel innerChannel; - int sequence = pipelinedRequest.getSequence(); - BigArrays bigArrays = transport.getBigArrays(); - try { - innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, corsConfig, threadContext); - } catch (final IllegalArgumentException e) { - if (badRequestCause == null) { - badRequestCause = e; - } else { - badRequestCause.addSuppressed(e); - } - final NioHttpRequest innerRequest = - new NioHttpRequest( - xContentRegistry, - Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters - copiedRequest.uri(), - copiedRequest); - innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, corsConfig, threadContext); - } - channel = innerChannel; - } + NioHttpRequest httpRequest = new NioHttpRequest(copiedRequest, pipelinedRequest.getSequence()); if (request.decoderResult().isFailure()) { - transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } else if (badRequestCause != null) { - transport.dispatchBadRequest(httpRequest, channel, badRequestCause); + Throwable cause = request.decoderResult().cause(); + if (cause instanceof Error) { + ExceptionsHelper.dieOnError(cause); + transport.incomingRequestError(httpRequest, nioHttpChannel, new Exception(cause)); + } else { + transport.incomingRequestError(httpRequest, nioHttpChannel, (Exception) cause); + } } else { - transport.dispatchRequest(httpRequest, channel); + transport.incomingRequest(httpRequest, nioHttpChannel); } } finally { // As we have copied the buffer, we can release the request request.release(); } } - - private NioHttpRequest requestWithoutContentTypeHeader(final FullHttpRequest request, final Exception badRequestCause) { - final HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); - headersWithoutContentTypeHeader.add(request.headers()); - headersWithoutContentTypeHeader.remove("Content-Type"); - final FullHttpRequest requestWithoutContentTypeHeader = - new DefaultFullHttpRequest( - request.protocolVersion(), - request.method(), - request.uri(), - request.content(), - headersWithoutContentTypeHeader, // remove the Content-Type header so as to not parse it again - request.trailingHeaders()); // Content-Type can not be a trailing header - try { - return new NioHttpRequest(xContentRegistry, requestWithoutContentTypeHeader); - } catch (final RestRequest.BadParameterException e) { - badRequestCause.addSuppressed(e); - return requestWithoutParameters(requestWithoutContentTypeHeader); - } - } - - private NioHttpRequest requestWithoutParameters(final FullHttpRequest request) { - // remove all parameters as at least one is incorrectly encoded - return new NioHttpRequest(xContentRegistry, Collections.emptyMap(), request.uri(), request); - } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java index cf8c92bff905c..41cb72aa32273 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -29,6 +29,7 @@ import io.netty.channel.embedded.EmbeddedChannel; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.WriteOperation; import java.nio.ByteBuffer; @@ -97,6 +98,13 @@ public int read(ByteBuffer[] buffers) { return byteBuf.readerIndex() - initialReaderIndex; } + public int read(InboundChannelBuffer.Page[] pages) { + ByteBuf byteBuf = PagedByteBuf.byteBufFromPages(pages); + int readableBytes = byteBuf.readableBytes(); + nettyChannel.writeInbound(byteBuf); + return readableBytes; + } + public Object pollInboundMessage() { return nettyChannel.readInbound(); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java index 634421b34ea48..088f0e85dde23 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -19,244 +19,21 @@ package org.elasticsearch.http.nio; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.codec.http.cookie.Cookie; -import io.netty.handler.codec.http.cookie.ServerCookieDecoder; -import io.netty.handler.codec.http.cookie.ServerCookieEncoder; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.http.HttpHandlingSettings; -import org.elasticsearch.http.nio.cors.NioCorsConfig; -import org.elasticsearch.http.nio.cors.NioCorsHandler; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.HttpResponse; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.rest.AbstractRestChannel; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.BiConsumer; +import java.io.IOException; +import java.nio.channels.SocketChannel; -public class NioHttpChannel extends AbstractRestChannel { +public class NioHttpChannel extends NioSocketChannel implements HttpChannel { - private final BigArrays bigArrays; - private final int sequence; - private final NioCorsConfig corsConfig; - private final ThreadContext threadContext; - private final FullHttpRequest nettyRequest; - private final NioSocketChannel nioChannel; - private final boolean resetCookies; - - NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence, - HttpHandlingSettings settings, NioCorsConfig corsConfig, ThreadContext threadContext) { - super(request, settings.getDetailedErrorsEnabled()); - this.nioChannel = nioChannel; - this.bigArrays = bigArrays; - this.sequence = sequence; - this.corsConfig = corsConfig; - this.threadContext = threadContext; - this.nettyRequest = request.getRequest(); - this.resetCookies = settings.isResetCookies(); - } - - @Override - public void sendResponse(RestResponse response) { - // if the response object was created upstream, then use it; - // otherwise, create a new one - ByteBuf buffer = ByteBufUtils.toByteBuf(response.content()); - final FullHttpResponse resp; - if (HttpMethod.HEAD.equals(nettyRequest.method())) { - resp = newResponse(Unpooled.EMPTY_BUFFER); - } else { - resp = newResponse(buffer); - } - resp.setStatus(getStatus(response.status())); - - NioCorsHandler.setCorsResponseHeaders(nettyRequest, resp, corsConfig); - - String opaque = nettyRequest.headers().get("X-Opaque-Id"); - if (opaque != null) { - setHeaderField(resp, "X-Opaque-Id", opaque); - } - - // Add all custom headers - addCustomHeaders(resp, response.getHeaders()); - addCustomHeaders(resp, threadContext.getResponseHeaders()); - - ArrayList toClose = new ArrayList<>(3); - - boolean success = false; - try { - // If our response doesn't specify a content-type header, set one - setHeaderField(resp, HttpHeaderNames.CONTENT_TYPE.toString(), response.contentType(), false); - // If our response has no content-length, calculate and set one - setHeaderField(resp, HttpHeaderNames.CONTENT_LENGTH.toString(), String.valueOf(buffer.readableBytes()), false); - - addCookies(resp); - - BytesReference content = response.content(); - if (content instanceof Releasable) { - toClose.add((Releasable) content); - } - BytesStreamOutput bytesStreamOutput = bytesOutputOrNull(); - if (bytesStreamOutput instanceof ReleasableBytesStreamOutput) { - toClose.add((Releasable) bytesStreamOutput); - } - - if (isCloseConnection()) { - toClose.add(nioChannel::close); - } - - BiConsumer listener = (aVoid, ex) -> Releasables.close(toClose); - nioChannel.getContext().sendMessage(new NioHttpResponse(sequence, resp), listener); - success = true; - } finally { - if (success == false) { - Releasables.close(toClose); - } - } - } - - @Override - protected BytesStreamOutput newBytesOutput() { - return new ReleasableBytesStreamOutput(bigArrays); - } - - private void setHeaderField(HttpResponse resp, String headerField, String value) { - setHeaderField(resp, headerField, value, true); - } - - private void setHeaderField(HttpResponse resp, String headerField, String value, boolean override) { - if (override || !resp.headers().contains(headerField)) { - resp.headers().add(headerField, value); - } - } - - private void addCookies(HttpResponse resp) { - if (resetCookies) { - String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); - if (cookieString != null) { - Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); - if (!cookies.isEmpty()) { - // Reset the cookies if necessary. - resp.headers().set(HttpHeaderNames.SET_COOKIE, ServerCookieEncoder.STRICT.encode(cookies)); - } - } - } - } - - private void addCustomHeaders(HttpResponse response, Map> customHeaders) { - if (customHeaders != null) { - for (Map.Entry> headerEntry : customHeaders.entrySet()) { - for (String headerValue : headerEntry.getValue()) { - setHeaderField(response, headerEntry.getKey(), headerValue); - } - } - } - } - - // Create a new {@link HttpResponse} to transmit the response for the netty request. - private FullHttpResponse newResponse(ByteBuf buffer) { - final boolean http10 = isHttp10(); - final boolean close = isCloseConnection(); - // Build the response object. - final HttpResponseStatus status = HttpResponseStatus.OK; // default to initialize - final FullHttpResponse response; - if (http10) { - response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0, status, buffer); - if (!close) { - response.headers().add(HttpHeaderNames.CONNECTION, "Keep-Alive"); - } - } else { - response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status, buffer); - } - return response; - } - - // Determine if the request protocol version is HTTP 1.0 - private boolean isHttp10() { - return nettyRequest.protocolVersion().equals(HttpVersion.HTTP_1_0); - } - - // Determine if the request connection should be closed on completion. - private boolean isCloseConnection() { - final boolean http10 = isHttp10(); - return HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) || - (http10 && !HttpHeaderValues.KEEP_ALIVE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION))); - } - - private static Map MAP; - - static { - EnumMap map = new EnumMap<>(RestStatus.class); - map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE); - map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS); - map.put(RestStatus.OK, HttpResponseStatus.OK); - map.put(RestStatus.CREATED, HttpResponseStatus.CREATED); - map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED); - map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION); - map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT); - map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT); - map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT); - map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this?? - map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES); - map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY); - map.put(RestStatus.FOUND, HttpResponseStatus.FOUND); - map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER); - map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED); - map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY); - map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT); - map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED); - map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED); - map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN); - map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND); - map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED); - map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE); - map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED); - map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT); - map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT); - map.put(RestStatus.GONE, HttpResponseStatus.GONE); - map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED); - map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED); - map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); - map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG); - map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE); - map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE); - map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED); - map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); - map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); - map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); - map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); - map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE); - map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT); - map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED); - MAP = Collections.unmodifiableMap(map); + NioHttpChannel(SocketChannel socketChannel) throws IOException { + super(socketChannel); } - private static HttpResponseStatus getStatus(RestStatus status) { - return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); + public void sendResponse(HttpResponse response, ActionListener listener) { + getContext().sendMessage(response, ActionListener.toBiConsumer(listener)); } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java index 1eb63364f995a..977092ddac0aa 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java @@ -68,7 +68,7 @@ public void write(final ChannelHandlerContext ctx, final Object msg, final Chann List> readyResponses = aggregator.write(response, listener); success = true; for (Tuple responseToWrite : readyResponses) { - ctx.write(responseToWrite.v1().getResponse(), responseToWrite.v2()); + ctx.write(responseToWrite.v1(), responseToWrite.v2()); } } catch (IllegalStateException e) { ctx.channel().close(); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java index 4dcd6ba19e06b..08937593f3ba6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java @@ -19,13 +19,20 @@ package org.elasticsearch.http.nio; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpRequest; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; import java.util.AbstractMap; import java.util.Collection; @@ -35,25 +42,17 @@ import java.util.Set; import java.util.stream.Collectors; -public class NioHttpRequest extends RestRequest { +public class NioHttpRequest implements HttpRequest { private final FullHttpRequest request; private final BytesReference content; + private final HttpHeadersMap headers; + private final int sequence; - NioHttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request) { - super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); - this.request = request; - if (request.content().isReadable()) { - this.content = ByteBufUtils.toBytesReference(request.content()); - } else { - this.content = BytesArray.EMPTY; - } - - } - - NioHttpRequest(NamedXContentRegistry xContentRegistry, Map params, String uri, FullHttpRequest request) { - super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + NioHttpRequest(FullHttpRequest request, int sequence) { this.request = request; + headers = new HttpHeadersMap(request.headers()); + this.sequence = sequence; if (request.content().isReadable()) { this.content = ByteBufUtils.toBytesReference(request.content()); } else { @@ -62,38 +61,38 @@ public class NioHttpRequest extends RestRequest { } @Override - public Method method() { + public RestRequest.Method method() { HttpMethod httpMethod = request.method(); if (httpMethod == HttpMethod.GET) - return Method.GET; + return RestRequest.Method.GET; if (httpMethod == HttpMethod.POST) - return Method.POST; + return RestRequest.Method.POST; if (httpMethod == HttpMethod.PUT) - return Method.PUT; + return RestRequest.Method.PUT; if (httpMethod == HttpMethod.DELETE) - return Method.DELETE; + return RestRequest.Method.DELETE; if (httpMethod == HttpMethod.HEAD) { - return Method.HEAD; + return RestRequest.Method.HEAD; } if (httpMethod == HttpMethod.OPTIONS) { - return Method.OPTIONS; + return RestRequest.Method.OPTIONS; } if (httpMethod == HttpMethod.PATCH) { - return Method.PATCH; + return RestRequest.Method.PATCH; } if (httpMethod == HttpMethod.TRACE) { - return Method.TRACE; + return RestRequest.Method.TRACE; } if (httpMethod == HttpMethod.CONNECT) { - return Method.CONNECT; + return RestRequest.Method.CONNECT; } throw new IllegalArgumentException("Unexpected http method: " + httpMethod); @@ -105,19 +104,65 @@ public String uri() { } @Override - public boolean hasContent() { - return content.length() > 0; + public BytesReference content() { + return content; } + @Override - public BytesReference content() { - return content; + public final Map> getHeaders() { + return headers; + } + + @Override + public List strictCookies() { + String cookieString = request.headers().get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + return ServerCookieEncoder.STRICT.encode(cookies); + } + } + return Collections.emptyList(); + } + + @Override + public HttpVersion protocolVersion() { + if (request.protocolVersion().equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_0)) { + return HttpRequest.HttpVersion.HTTP_1_0; + } else if (request.protocolVersion().equals(io.netty.handler.codec.http.HttpVersion.HTTP_1_1)) { + return HttpRequest.HttpVersion.HTTP_1_1; + } else { + throw new IllegalArgumentException("Unexpected http protocol version: " + request.protocolVersion()); + } } - public FullHttpRequest getRequest() { + @Override + public HttpRequest removeHeader(String header) { + HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove(header); + HttpHeaders trailingHeaders = new DefaultHttpHeaders(); + trailingHeaders.add(request.trailingHeaders()); + trailingHeaders.remove(header); + FullHttpRequest requestWithoutHeader = new DefaultFullHttpRequest(request.protocolVersion(), request.method(), request.uri(), + request.content(), headersWithoutContentTypeHeader, trailingHeaders); + return new NioHttpRequest(requestWithoutHeader, sequence); + } + + @Override + public NioHttpResponse createResponse(RestStatus status, BytesReference content) { + return new NioHttpResponse(this, status, content); + } + + public FullHttpRequest nettyRequest() { return request; } + int sequence() { + return sequence; + } + /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java index 4b634994b4557..d67494667384a 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java @@ -19,19 +19,40 @@ package org.elasticsearch.http.nio; -import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.http.HttpPipelinedMessage; +import org.elasticsearch.http.HttpResponse; +import org.elasticsearch.rest.RestStatus; -public class NioHttpResponse extends HttpPipelinedMessage { +public class NioHttpResponse extends DefaultFullHttpResponse implements HttpResponse, HttpPipelinedMessage { - private final FullHttpResponse response; + private final int sequence; + private final NioHttpRequest request; - public NioHttpResponse(int sequence, FullHttpResponse response) { - super(sequence); - this.response = response; + NioHttpResponse(NioHttpRequest request, RestStatus status, BytesReference content) { + super(request.nettyRequest().protocolVersion(), HttpResponseStatus.valueOf(status.getStatus()), ByteBufUtils.toByteBuf(content)); + this.sequence = request.sequence(); + this.request = request; } - public FullHttpResponse getResponse() { - return response; + @Override + public void addHeader(String name, String value) { + headers().add(name, value); + } + + @Override + public boolean containsHeader(String name) { + return headers().contains(name); + } + + @Override + public int getSequence() { + return sequence; + } + + public NioHttpRequest getRequest() { + return request; } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index 57aaebb16a1a2..ba51f7c684818 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -32,17 +32,18 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.NetworkExceptionHelper; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.BindHttpException; -import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.nio.cors.NioCorsConfig; @@ -53,17 +54,18 @@ import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioChannel; import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; -import org.elasticsearch.nio.NioSelector; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.ArrayList; @@ -104,11 +106,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.nio.worker_count"), Setting.Property.NodeScope); - private final BigArrays bigArrays; - private final ThreadPool threadPool; - private final NamedXContentRegistry xContentRegistry; - - private final HttpHandlingSettings httpHandlingSettings; + private final PageCacheRecycler pageCacheRecycler; private final boolean tcpNoDelay; private final boolean tcpKeepAlive; @@ -122,18 +120,16 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private HttpChannelFactory channelFactory; private final NioCorsConfig corsConfig; - public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, - NamedXContentRegistry xContentRegistry, HttpServerTransport.Dispatcher dispatcher) { - super(settings, networkService, threadPool, dispatcher); - this.bigArrays = bigArrays; - this.threadPool = threadPool; - this.xContentRegistry = xContentRegistry; + public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, + HttpServerTransport.Dispatcher dispatcher) { + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); + this.pageCacheRecycler = pageCacheRecycler; ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); int pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); - this.httpHandlingSettings = HttpHandlingSettings.fromSettings(settings);; this.corsConfig = buildCorsConfig(settings); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); @@ -148,10 +144,6 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, pipeliningMaxEvents); } - BigArrays getBigArrays() { - return bigArrays; - } - public Logger getLogger() { return logger; } @@ -335,20 +327,24 @@ private void acceptChannel(NioSocketChannel socketChannel) { socketChannels.add(socketChannel); } - private class HttpChannelFactory extends ChannelFactory { + private class HttpChannelFactory extends ChannelFactory { private HttpChannelFactory() { super(new RawChannelFactory(tcpNoDelay, tcpKeepAlive, reuseAddress, tcpSendBufferSize, tcpReceiveBufferSize)); } @Override - public NioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { - NioSocketChannel nioChannel = new NioSocketChannel(channel); + public NioHttpChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { + NioHttpChannel nioChannel = new NioHttpChannel(channel); + java.util.function.Supplier pageSupplier = () -> { + Recycler.V bytes = pageCacheRecycler.bytePage(false); + return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); + }; HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(nioChannel,NioHttpServerTransport.this, - httpHandlingSettings, xContentRegistry, corsConfig, threadPool.getThreadContext()); + handlingSettings, corsConfig); Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); SocketChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, httpReadWritePipeline, - InboundChannelBuffer.allocatingInstance()); + new InboundChannelBuffer(pageSupplier)); nioChannel.setContext(context); return nioChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/PagedByteBuf.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/PagedByteBuf.java new file mode 100644 index 0000000000000..40f3aeecfbc94 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/PagedByteBuf.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.buffer.UnpooledHeapByteBuf; +import org.elasticsearch.nio.InboundChannelBuffer; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class PagedByteBuf extends UnpooledHeapByteBuf { + + private final Runnable releasable; + + private PagedByteBuf(byte[] array, Runnable releasable) { + super(UnpooledByteBufAllocator.DEFAULT, array, array.length); + this.releasable = releasable; + } + + static ByteBuf byteBufFromPages(InboundChannelBuffer.Page[] pages) { + int componentCount = pages.length; + if (componentCount == 0) { + return Unpooled.EMPTY_BUFFER; + } else if (componentCount == 1) { + return byteBufFromPage(pages[0]); + } else { + int maxComponents = Math.max(16, componentCount); + final List components = new ArrayList<>(componentCount); + for (InboundChannelBuffer.Page page : pages) { + components.add(byteBufFromPage(page)); + } + return new CompositeByteBuf(UnpooledByteBufAllocator.DEFAULT, false, maxComponents, components); + } + } + + private static ByteBuf byteBufFromPage(InboundChannelBuffer.Page page) { + ByteBuffer buffer = page.getByteBuffer(); + assert buffer.isDirect() == false && buffer.hasArray() : "Must be a heap buffer with an array"; + int offset = buffer.arrayOffset() + buffer.position(); + PagedByteBuf newByteBuf = new PagedByteBuf(buffer.array(), page::close); + return newByteBuf.slice(offset, buffer.remaining()); + } + + + @Override + protected void deallocate() { + try { + super.deallocate(); + } finally { + releasable.run(); + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java index 6358510703779..98ae2d523ca81 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java @@ -22,6 +22,7 @@ import io.netty.channel.ChannelDuplexHandler; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaders; @@ -30,6 +31,7 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; import org.elasticsearch.common.Strings; +import org.elasticsearch.http.nio.NioHttpResponse; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -76,6 +78,14 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception ctx.fireChannelRead(msg); } + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + assert msg instanceof NioHttpResponse : "Invalid message type: " + msg.getClass(); + NioHttpResponse response = (NioHttpResponse) msg; + setCorsResponseHeaders(response.getRequest().nettyRequest(), response, config); + ctx.write(response, promise); + } + public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, NioCorsConfig config) { if (!config.isCorsSupportEnabled()) { return; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 1cc94f18dd3c1..1da8e909b2dd8 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -67,12 +67,13 @@ public Map> getTransports(Settings settings, ThreadP @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { return Collections.singletonMap(NIO_HTTP_TRANSPORT_NAME, - () -> new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher)); + () -> new NioHttpServerTransport(settings, networkService, bigArrays, pageCacheRecycler, threadPool, xContentRegistry, + dispatcher)); } } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index 6ad53521ee12a..5bda7e1b83d81 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -23,29 +23,31 @@ import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestEncoder; -import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.http.HttpResponse; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; +import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.SocketChannelContext; -import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -55,6 +57,9 @@ import java.util.List; import java.util.function.BiConsumer; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; @@ -64,7 +69,12 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -72,7 +82,7 @@ public class HttpReadWriteHandlerTests extends ESTestCase { private HttpReadWriteHandler handler; - private NioSocketChannel nioSocketChannel; + private NioHttpChannel nioHttpChannel; private NioHttpServerTransport transport; private final RequestEncoder requestEncoder = new RequestEncoder(); @@ -96,15 +106,13 @@ public void setMocks() { SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings), SETTING_PIPELINING_MAX_EVENTS.getDefault(settings), SETTING_CORS_ENABLED.getDefault(settings)); - ThreadContext threadContext = new ThreadContext(settings); - nioSocketChannel = mock(NioSocketChannel.class); - handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, - NioCorsConfigBuilder.forAnyOrigin().build(), threadContext); + nioHttpChannel = mock(NioHttpChannel.class); + handler = new HttpReadWriteHandler(nioHttpChannel, transport, httpHandlingSettings, NioCorsConfigBuilder.forAnyOrigin().build()); } public void testSuccessfulDecodeHttpRequest() throws IOException { String uri = "localhost:9090/" + randomAlphaOfLength(8); - HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + io.netty.handler.codec.http.HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); ByteBuf buf = requestEncoder.encode(httpRequest); int slicePoint = randomInt(buf.writerIndex() - 1); @@ -113,22 +121,21 @@ public void testSuccessfulDecodeHttpRequest() throws IOException { ByteBuf slicedBuf2 = buf.retainedSlice(slicePoint, buf.writerIndex()); handler.consumeReads(toChannelBuffer(slicedBuf)); - verify(transport, times(0)).dispatchRequest(any(RestRequest.class), any(RestChannel.class)); + verify(transport, times(0)).incomingRequest(any(HttpRequest.class), any(NioHttpChannel.class)); handler.consumeReads(toChannelBuffer(slicedBuf2)); - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(RestRequest.class); - verify(transport).dispatchRequest(requestCaptor.capture(), any(RestChannel.class)); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpRequest.class); + verify(transport).incomingRequest(requestCaptor.capture(), any(NioHttpChannel.class)); - NioHttpRequest nioHttpRequest = (NioHttpRequest) requestCaptor.getValue(); - FullHttpRequest nettyHttpRequest = nioHttpRequest.getRequest(); - assertEquals(httpRequest.protocolVersion(), nettyHttpRequest.protocolVersion()); - assertEquals(httpRequest.method(), nettyHttpRequest.method()); + HttpRequest nioHttpRequest = requestCaptor.getValue(); + assertEquals(HttpRequest.HttpVersion.HTTP_1_1, nioHttpRequest.protocolVersion()); + assertEquals(RestRequest.Method.GET, nioHttpRequest.method()); } public void testDecodeHttpRequestError() throws IOException { String uri = "localhost:9090/" + randomAlphaOfLength(8); - HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + io.netty.handler.codec.http.HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); ByteBuf buf = requestEncoder.encode(httpRequest); buf.setByte(0, ' '); @@ -137,15 +144,15 @@ public void testDecodeHttpRequestError() throws IOException { handler.consumeReads(toChannelBuffer(buf)); - ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(Throwable.class); - verify(transport).dispatchBadRequest(any(RestRequest.class), any(RestChannel.class), exceptionCaptor.capture()); + ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(Exception.class); + verify(transport).incomingRequestError(any(HttpRequest.class), any(NioHttpChannel.class), exceptionCaptor.capture()); assertTrue(exceptionCaptor.getValue() instanceof IllegalArgumentException); } public void testDecodeHttpRequestContentLengthToLongGeneratesOutboundMessage() throws IOException { String uri = "localhost:9090/" + randomAlphaOfLength(8); - HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, uri, false); + io.netty.handler.codec.http.HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, uri, false); HttpUtil.setContentLength(httpRequest, 1025); HttpUtil.setKeepAlive(httpRequest, false); @@ -153,60 +160,176 @@ public void testDecodeHttpRequestContentLengthToLongGeneratesOutboundMessage() t handler.consumeReads(toChannelBuffer(buf)); - verify(transport, times(0)).dispatchBadRequest(any(), any(), any()); - verify(transport, times(0)).dispatchRequest(any(), any()); + verify(transport, times(0)).incomingRequestError(any(), any(), any()); + verify(transport, times(0)).incomingRequest(any(), any()); List flushOperations = handler.pollFlushOperations(); assertFalse(flushOperations.isEmpty()); FlushOperation flushOperation = flushOperations.get(0); - HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite())); + FullHttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite())); assertEquals(HttpVersion.HTTP_1_1, response.protocolVersion()); assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status()); flushOperation.getListener().accept(null, null); // Since we have keep-alive set to false, we should close the channel after the response has been // flushed - verify(nioSocketChannel).close(); + verify(nioHttpChannel).close(); } @SuppressWarnings("unchecked") public void testEncodeHttpResponse() throws IOException { prepareHandlerForResponse(handler); - FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); - NioHttpResponse pipelinedResponse = new NioHttpResponse(0, fullHttpResponse); + DefaultFullHttpRequest nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + NioHttpRequest nioHttpRequest = new NioHttpRequest(nettyRequest, 0); + NioHttpResponse httpResponse = nioHttpRequest.createResponse(RestStatus.OK, BytesArray.EMPTY); + httpResponse.addHeader(HttpHeaderNames.CONTENT_LENGTH.toString(), "0"); SocketChannelContext context = mock(SocketChannelContext.class); - HttpWriteOperation writeOperation = new HttpWriteOperation(context, pipelinedResponse, mock(BiConsumer.class)); + HttpWriteOperation writeOperation = new HttpWriteOperation(context, httpResponse, mock(BiConsumer.class)); List flushOperations = handler.writeToBytes(writeOperation); - HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite())); + FullHttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite())); assertEquals(HttpResponseStatus.OK, response.status()); assertEquals(HttpVersion.HTTP_1_1, response.protocolVersion()); } - private FullHttpRequest prepareHandlerForResponse(HttpReadWriteHandler adaptor) throws IOException { - HttpMethod method = HttpMethod.GET; - HttpVersion version = HttpVersion.HTTP_1_1; + public void testCorsEnabledWithoutAllowOrigins() throws IOException { + // Set up a HTTP transport with only the CORS enabled setting + Settings settings = Settings.builder() + .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) + .build(); + io.netty.handler.codec.http.HttpResponse response = executeCorsRequest(settings, "remote-host", "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); + } + + public void testCorsEnabledWithAllowOrigins() throws IOException { + final String originValue = "remote-host"; + // create a http transport with CORS enabled and allow origin configured + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + io.netty.handler.codec.http.HttpResponse response = executeCorsRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testCorsAllowOriginWithSameHost() throws IOException { + String originValue = "remote-host"; + String host = "remote-host"; + // create a http transport with CORS enabled + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .build(); + FullHttpResponse response = executeCorsRequest(settings, originValue, host); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = "http://" + originValue; + response = executeCorsRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue + ":5555"; + host = host + ":5555"; + response = executeCorsRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue.replace("http", "https"); + response = executeCorsRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testThatStringLiteralWorksOnMatch() throws IOException { + final String originValue = "remote-host"; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") + .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) + .build(); + io.netty.handler.codec.http.HttpResponse response = executeCorsRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); + } + + public void testThatAnyOriginWorks() throws IOException { + final String originValue = NioCorsHandler.ANY_ORIGIN; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + io.netty.handler.codec.http.HttpResponse response = executeCorsRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); + } + + private FullHttpResponse executeCorsRequest(final Settings settings, final String originValue, final String host) throws IOException { + HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig nioCorsConfig = NioHttpServerTransport.buildCorsConfig(settings); + HttpReadWriteHandler handler = new HttpReadWriteHandler(nioHttpChannel, transport, httpHandlingSettings, nioCorsConfig); + prepareHandlerForResponse(handler); + DefaultFullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (originValue != null) { + httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); + } + httpRequest.headers().add(HttpHeaderNames.HOST, host); + NioHttpRequest nioHttpRequest = new NioHttpRequest(httpRequest, 0); + BytesArray content = new BytesArray("content"); + HttpResponse response = nioHttpRequest.createResponse(RestStatus.OK, content); + response.addHeader("Content-Length", Integer.toString(content.length())); + + SocketChannelContext context = mock(SocketChannelContext.class); + List flushOperations = handler.writeToBytes(handler.createWriteOperation(context, response, (v, e) -> {})); + + FlushOperation flushOperation = flushOperations.get(0); + return responseDecoder.decode(Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite())); + } + + + + private NioHttpRequest prepareHandlerForResponse(HttpReadWriteHandler handler) throws IOException { + HttpMethod method = randomBoolean() ? HttpMethod.GET : HttpMethod.HEAD; + HttpVersion version = randomBoolean() ? HttpVersion.HTTP_1_0 : HttpVersion.HTTP_1_1; String uri = "http://localhost:9090/" + randomAlphaOfLength(8); - HttpRequest request = new DefaultFullHttpRequest(version, method, uri); + io.netty.handler.codec.http.HttpRequest request = new DefaultFullHttpRequest(version, method, uri); ByteBuf buf = requestEncoder.encode(request); handler.consumeReads(toChannelBuffer(buf)); - ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(RestRequest.class); - verify(transport).dispatchRequest(requestCaptor.capture(), any(RestChannel.class)); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(NioHttpRequest.class); + verify(transport, atLeastOnce()).incomingRequest(requestCaptor.capture(), any(HttpChannel.class)); - NioHttpRequest nioHttpRequest = (NioHttpRequest) requestCaptor.getValue(); - FullHttpRequest requestParsed = nioHttpRequest.getRequest(); - assertNotNull(requestParsed); - assertEquals(requestParsed.method(), method); - assertEquals(requestParsed.protocolVersion(), version); - assertEquals(requestParsed.uri(), uri); - return requestParsed; + NioHttpRequest nioHttpRequest = requestCaptor.getValue(); + assertNotNull(nioHttpRequest); + assertEquals(method.name(), nioHttpRequest.method().name()); + if (version == HttpVersion.HTTP_1_1) { + assertEquals(HttpRequest.HttpVersion.HTTP_1_1, nioHttpRequest.protocolVersion()); + } else { + assertEquals(HttpRequest.HttpVersion.HTTP_1_0, nioHttpRequest.protocolVersion()); + } + assertEquals(nioHttpRequest.uri(), uri); + return nioHttpRequest; } private InboundChannelBuffer toChannelBuffer(ByteBuf buf) { @@ -226,11 +349,13 @@ private InboundChannelBuffer toChannelBuffer(ByteBuf buf) { return buffer; } + private static final int MAX = 16 * 1024 * 1024; + private static class RequestEncoder { - private final EmbeddedChannel requestEncoder = new EmbeddedChannel(new HttpRequestEncoder()); + private final EmbeddedChannel requestEncoder = new EmbeddedChannel(new HttpRequestEncoder(), new HttpObjectAggregator(MAX)); - private ByteBuf encode(HttpRequest httpRequest) { + private ByteBuf encode(io.netty.handler.codec.http.HttpRequest httpRequest) { requestEncoder.writeOutbound(httpRequest); return requestEncoder.readOutbound(); } @@ -238,9 +363,9 @@ private ByteBuf encode(HttpRequest httpRequest) { private static class ResponseDecoder { - private final EmbeddedChannel responseDecoder = new EmbeddedChannel(new HttpResponseDecoder()); + private final EmbeddedChannel responseDecoder = new EmbeddedChannel(new HttpResponseDecoder(), new HttpObjectAggregator(MAX)); - private HttpResponse decode(ByteBuf response) { + private FullHttpResponse decode(ByteBuf response) { responseDecoder.writeInbound(response); return responseDecoder.readInbound(); } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java deleted file mode 100644 index 5fa0a7ae0a679..0000000000000 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http.nio; - -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpVersion; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.MockPageCacheRecycler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.http.HttpHandlingSettings; -import org.elasticsearch.http.HttpTransportSettings; -import org.elasticsearch.http.nio.cors.NioCorsConfig; -import org.elasticsearch.http.nio.cors.NioCorsHandler; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.SocketChannelContext; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.junit.After; -import org.junit.Before; -import org.mockito.ArgumentCaptor; - -import java.io.IOException; -import java.nio.channels.ClosedChannelException; -import java.nio.charset.StandardCharsets; -import java.util.function.BiConsumer; - -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class NioHttpChannelTests extends ESTestCase { - - private ThreadPool threadPool; - private MockBigArrays bigArrays; - private NioSocketChannel nioChannel; - private SocketChannelContext channelContext; - - @Before - public void setup() throws Exception { - nioChannel = mock(NioSocketChannel.class); - channelContext = mock(SocketChannelContext.class); - when(nioChannel.getContext()).thenReturn(channelContext); - threadPool = new TestThreadPool("test"); - bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - } - - @After - public void shutdown() throws Exception { - if (threadPool != null) { - threadPool.shutdownNow(); - } - } - - public void testResponse() { - final FullHttpResponse response = executeRequest(Settings.EMPTY, "request-host"); - assertThat(response.content(), equalTo(ByteBufUtils.toByteBuf(new TestResponse().content()))); - } - - public void testCorsEnabledWithoutAllowOrigins() { - // Set up a HTTP transport with only the CORS enabled setting - Settings settings = Settings.builder() - .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) - .build(); - HttpResponse response = executeRequest(settings, "remote-host", "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); - } - - public void testCorsEnabledWithAllowOrigins() { - final String originValue = "remote-host"; - // create a http transport with CORS enabled and allow origin configured - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) - .build(); - HttpResponse response = executeRequest(settings, originValue, "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - } - - public void testCorsAllowOriginWithSameHost() { - String originValue = "remote-host"; - String host = "remote-host"; - // create a http transport with CORS enabled - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .build(); - HttpResponse response = executeRequest(settings, originValue, host); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - - originValue = "http://" + originValue; - response = executeRequest(settings, originValue, host); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - - originValue = originValue + ":5555"; - host = host + ":5555"; - response = executeRequest(settings, originValue, host); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - - originValue = originValue.replace("http", "https"); - response = executeRequest(settings, originValue, host); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - } - - public void testThatStringLiteralWorksOnMatch() { - final String originValue = "remote-host"; - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) - .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") - .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) - .build(); - HttpResponse response = executeRequest(settings, originValue, "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); - } - - public void testThatAnyOriginWorks() { - final String originValue = NioCorsHandler.ANY_ORIGIN; - Settings settings = Settings.builder() - .put(SETTING_CORS_ENABLED.getKey(), true) - .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) - .build(); - HttpResponse response = executeRequest(settings, originValue, "request-host"); - // inspect response and validate - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); - String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); - assertThat(allowedOrigins, is(originValue)); - assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); - } - - public void testHeadersSet() { - Settings settings = Settings.builder().build(); - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); - final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); - HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); - NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); - - // send a response - NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, corsConfig, - threadPool.getThreadContext()); - TestResponse resp = new TestResponse(); - final String customHeader = "custom-header"; - final String customHeaderValue = "xyz"; - resp.addHeader(customHeader, customHeaderValue); - channel.sendResponse(resp); - - // inspect what was written - ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Object.class); - verify(channelContext).sendMessage(responseCaptor.capture(), any()); - Object nioResponse = responseCaptor.getValue(); - HttpResponse response = ((NioHttpResponse) nioResponse).getResponse(); - assertThat(response.headers().get("non-existent-header"), nullValue()); - assertThat(response.headers().get(customHeader), equalTo(customHeaderValue)); - assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length()))); - assertThat(response.headers().get(HttpHeaderNames.CONTENT_TYPE), equalTo(resp.contentType())); - } - - @SuppressWarnings("unchecked") - public void testReleaseInListener() throws IOException { - final Settings settings = Settings.builder().build(); - final NamedXContentRegistry registry = xContentRegistry(); - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - final NioHttpRequest request = new NioHttpRequest(registry, httpRequest); - HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); - NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); - - NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, - corsConfig, threadPool.getThreadContext()); - final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, - JsonXContent.contentBuilder().startObject().endObject()); - assertThat(response.content(), not(instanceOf(Releasable.class))); - - // ensure we have reserved bytes - if (randomBoolean()) { - BytesStreamOutput out = channel.bytesOutput(); - assertThat(out, instanceOf(ReleasableBytesStreamOutput.class)); - } else { - try (XContentBuilder builder = channel.newBuilder()) { - // do something builder - builder.startObject().endObject(); - } - } - - channel.sendResponse(response); - Class> listenerClass = (Class>) (Class) BiConsumer.class; - ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); - verify(channelContext).sendMessage(any(), listenerCaptor.capture()); - BiConsumer listener = listenerCaptor.getValue(); - if (randomBoolean()) { - listener.accept(null, null); - } else { - listener.accept(null, new ClosedChannelException()); - } - // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released - } - - - @SuppressWarnings("unchecked") - public void testConnectionClose() throws Exception { - final Settings settings = Settings.builder().build(); - final FullHttpRequest httpRequest; - final boolean close = randomBoolean(); - if (randomBoolean()) { - httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - if (close) { - httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); - } - } else { - httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/"); - if (!close) { - httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE); - } - } - final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); - - HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); - NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); - - NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, - corsConfig, threadPool.getThreadContext()); - final TestResponse resp = new TestResponse(); - channel.sendResponse(resp); - Class> listenerClass = (Class>) (Class) BiConsumer.class; - ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); - verify(channelContext).sendMessage(any(), listenerCaptor.capture()); - BiConsumer listener = listenerCaptor.getValue(); - listener.accept(null, null); - if (close) { - verify(nioChannel, times(1)).close(); - } else { - verify(nioChannel, times(0)).close(); - } - } - - private FullHttpResponse executeRequest(final Settings settings, final String host) { - return executeRequest(settings, null, host); - } - - private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) { - // construct request and send it over the transport layer - final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - if (originValue != null) { - httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); - } - httpRequest.headers().add(HttpHeaderNames.HOST, host); - final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); - - HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); - NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); - NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, httpHandlingSettings, corsConfig, - threadPool.getThreadContext()); - channel.sendResponse(new TestResponse()); - - // get the response - ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Object.class); - verify(channelContext, atLeastOnce()).sendMessage(responseCaptor.capture(), any()); - return ((NioHttpResponse) responseCaptor.getValue()).getResponse(); - } - - private static class TestResponse extends RestResponse { - - private final BytesReference reference; - - TestResponse() { - reference = ByteBufUtils.toBytesReference(Unpooled.copiedBuffer("content", StandardCharsets.UTF_8)); - } - - @Override - public String contentType() { - return "text"; - } - - @Override - public BytesReference content() { - return reference; - } - - @Override - public RestStatus status() { - return RestStatus.OK; - } - - } -} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java index 94d7db171a563..5f2784a356714 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java @@ -19,15 +19,12 @@ package org.elasticsearch.http.nio; -import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpMethod; @@ -35,7 +32,10 @@ import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.QueryStringDecoder; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -55,7 +55,6 @@ import java.util.stream.IntStream; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; -import static io.netty.handler.codec.http.HttpResponseStatus.OK; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import static org.hamcrest.core.Is.is; @@ -190,11 +189,11 @@ public void testPipeliningRequestsAreReleased() throws InterruptedException { ArrayList promises = new ArrayList<>(); for (int i = 1; i < requests.size(); ++i) { - final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); ChannelPromise promise = embeddedChannel.newPromise(); promises.add(promise); - int sequence = requests.get(i).getSequence(); - NioHttpResponse resp = new NioHttpResponse(sequence, httpResponse); + HttpPipelinedRequest pipelinedRequest = requests.get(i); + NioHttpRequest nioHttpRequest = new NioHttpRequest(pipelinedRequest.getRequest(), pipelinedRequest.getSequence()); + NioHttpResponse resp = nioHttpRequest.createResponse(RestStatus.OK, BytesArray.EMPTY); embeddedChannel.writeAndFlush(resp, promise); } @@ -231,10 +230,10 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) thro } - private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { + private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { @Override - protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { + protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { LastHttpContent request = pipelinedRequest.getRequest(); final QueryStringDecoder decoder; if (request instanceof FullHttpRequest) { @@ -244,9 +243,10 @@ protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedReques } final String uri = decoder.path().replace("/", ""); - final ByteBuf content = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); - final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK, content); - httpResponse.headers().add(CONTENT_LENGTH, content.readableBytes()); + final BytesReference content = new BytesArray(uri.getBytes(StandardCharsets.UTF_8)); + NioHttpRequest nioHttpRequest = new NioHttpRequest(pipelinedRequest.getRequest(), pipelinedRequest.getSequence()); + NioHttpResponse httpResponse = nioHttpRequest.createResponse(RestStatus.OK, content); + httpResponse.addHeader(CONTENT_LENGTH.toString(), Integer.toString(content.length())); final CountDownLatch waitingLatch = new CountDownLatch(1); waitingRequests.put(uri, waitingLatch); @@ -258,7 +258,7 @@ protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedReques waitingLatch.await(1000, TimeUnit.SECONDS); final ChannelPromise promise = ctx.newPromise(); eventLoopService.submit(() -> { - ctx.write(new NioHttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + ctx.write(httpResponse, promise); finishingLatch.countDown(); }); } catch (InterruptedException e) { diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index c43fc7d072360..a0cb74f7cd205 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -88,12 +88,14 @@ public class NioHttpServerTransportTests extends ESTestCase { private NetworkService networkService; private ThreadPool threadPool; private MockBigArrays bigArrays; + private MockPageCacheRecycler pageRecycler; @Before public void setup() throws Exception { networkService = new NetworkService(Collections.emptyList()); threadPool = new TestThreadPool("test"); - bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + pageRecycler = new MockPageCacheRecycler(Settings.EMPTY); + bigArrays = new MockBigArrays(pageRecycler, new NoneCircuitBreakerService()); } @After @@ -186,7 +188,7 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC throw new AssertionError(); } }; - try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, + try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, threadPool, xContentRegistry(), dispatcher)) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -210,13 +212,13 @@ public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadC } public void testBindUnavailableAddress() { - try (NioHttpServerTransport transport = new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, - xContentRegistry(), new NullDispatcher())) { + try (NioHttpServerTransport transport = new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, pageRecycler, + threadPool, xContentRegistry(), new NullDispatcher())) { transport.start(); TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); - try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, - xContentRegistry(), new NullDispatcher())) { + try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, + threadPool, xContentRegistry(), new NullDispatcher())) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); } @@ -259,8 +261,8 @@ public void dispatchBadRequest(final RestRequest request, settings = Settings.builder().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); } - try (NioHttpServerTransport transport = - new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, + threadPool, xContentRegistry(), dispatcher)) { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); @@ -279,41 +281,7 @@ public void dispatchBadRequest(final RestRequest request, assertNotNull(causeReference.get()); assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); } - - public void testDispatchDoesNotModifyThreadContext() throws InterruptedException { - final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { - - @Override - public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { - threadContext.putHeader("foo", "bar"); - threadContext.putTransient("bar", "baz"); - } - - @Override - public void dispatchBadRequest(final RestRequest request, - final RestChannel channel, - final ThreadContext threadContext, - final Throwable cause) { - threadContext.putHeader("foo_bad", "bar"); - threadContext.putTransient("bar_bad", "baz"); - } - - }; - - try (NioHttpServerTransport transport = - new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { - transport.start(); - - transport.dispatchRequest(null, null); - assertNull(threadPool.getThreadContext().getHeader("foo")); - assertNull(threadPool.getThreadContext().getTransient("bar")); - - transport.dispatchBadRequest(null, null, null); - assertNull(threadPool.getThreadContext().getHeader("foo_bad")); - assertNull(threadPool.getThreadContext().getTransient("bar_bad")); - } - } - + // public void testReadTimeout() throws Exception { // final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { // diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/PagedByteBufTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/PagedByteBufTests.java new file mode 100644 index 0000000000000..15bd18ecf6959 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/PagedByteBufTests.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.test.ESTestCase; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicInteger; + +public class PagedByteBufTests extends ESTestCase { + + public void testReleasingPage() { + AtomicInteger integer = new AtomicInteger(0); + int pageCount = randomInt(10) + 1; + ArrayList pages = new ArrayList<>(); + for (int i = 0; i < pageCount; ++i) { + pages.add(new InboundChannelBuffer.Page(ByteBuffer.allocate(10), integer::incrementAndGet)); + } + + ByteBuf byteBuf = PagedByteBuf.byteBufFromPages(pages.toArray(new InboundChannelBuffer.Page[0])); + + assertEquals(0, integer.get()); + byteBuf.retain(); + byteBuf.release(); + assertEquals(0, integer.get()); + ByteBuf secondBuf = byteBuf.retainedSlice(); + byteBuf.release(); + assertEquals(0, integer.get()); + secondBuf.release(); + assertEquals(pageCount, integer.get()); + } + + public void testBytesAreUsed() { + byte[] bytes1 = new byte[10]; + byte[] bytes2 = new byte[10]; + + for (int i = 0; i < 10; ++i) { + bytes1[i] = (byte) i; + } + + for (int i = 10; i < 20; ++i) { + bytes2[i - 10] = (byte) i; + } + + InboundChannelBuffer.Page[] pages = new InboundChannelBuffer.Page[2]; + pages[0] = new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes1), () -> {}); + pages[1] = new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes2), () -> {}); + + ByteBuf byteBuf = PagedByteBuf.byteBufFromPages(pages); + assertEquals(20, byteBuf.readableBytes()); + + for (int i = 0; i < 20; ++i) { + assertEquals((byte) i, byteBuf.getByte(i)); + } + + InboundChannelBuffer.Page[] pages2 = new InboundChannelBuffer.Page[2]; + ByteBuffer firstBuffer = ByteBuffer.wrap(bytes1); + firstBuffer.position(2); + ByteBuffer secondBuffer = ByteBuffer.wrap(bytes2); + secondBuffer.limit(8); + pages2[0] = new InboundChannelBuffer.Page(firstBuffer, () -> {}); + pages2[1] = new InboundChannelBuffer.Page(secondBuffer, () -> {}); + + ByteBuf byteBuf2 = PagedByteBuf.byteBufFromPages(pages2); + assertEquals(16, byteBuf2.readableBytes()); + + for (int i = 2; i < 18; ++i) { + assertEquals((byte) i, byteBuf2.getByte(i - 2)); + } + } +} diff --git a/qa/build.gradle b/qa/build.gradle index 709c309359ecf..0336b947d06aa 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -5,6 +5,20 @@ subprojects { Project subproj -> subproj.tasks.withType(RestIntegTestTask) { subproj.extensions.configure("${it.name}Cluster") { cluster -> cluster.distribution = System.getProperty('tests.distribution', 'oss-zip') + if (cluster.distribution == 'zip') { + /* + * Add Elastic's repositories so we can resolve older versions of the + * default distribution. Those aren't in maven central. + */ + repositories { + maven { + url "https://artifacts.elastic.co/maven" + } + maven { + url "https://snapshots.elastic.co/maven" + } + } + } } } } diff --git a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 026b46e21bc6d..514091409334e 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -163,3 +163,31 @@ setup() { assert_file_exist /var/log/elasticsearch/gc.log.0.current stop_elasticsearch_service } + +# Ensures that if $MAX_MAP_COUNT is less than the set value on the OS +# it will be updated +@test "[INIT.D] sysctl is run when the value set is too small" { + # intentionally a ridiculously low number + sysctl -q -w vm.max_map_count=100 + start_elasticsearch_service + max_map_count=$(sysctl -n vm.max_map_count) + stop_elasticsearch_service + + [ $max_map_count = 262144 ] + +} + +# Ensures that if $MAX_MAP_COUNT is greater than the set vaule on the OS +# we do not attempt to update it this should cover equality as well as I think +# we can trust that equality operators work as intended. +@test "[INIT.D] sysctl is not run when it already has a larger or equal value set" { + # intentionally set to the default +1 + sysctl -q -w vm.max_map_count=262145 + start_elasticsearch_service + max_map_count=$(sysctl -n vm.max_map_count) + stop_elasticsearch_service + + # default value +1 + [ $max_map_count = 262145 ] + +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index c2259c7b55d14..3ee0340387496 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -198,9 +198,7 @@ header. The warnings must match exactly. Using it looks like this: .... If the arguments to `do` include `node_selector` then the request is only -sent to nodes that match the `node_selector`. Currently only the `version` -selector is supported and it has the same logic as the `version` field in -`skip`. It looks like this: +sent to nodes that match the `node_selector`. It looks like this: .... "test id": @@ -216,6 +214,19 @@ selector is supported and it has the same logic as the `version` field in body: { foo: bar } .... +If you list multiple selectors then the request will only go to nodes that +match all of those selectors. The following selectors are supported: +* `version`: Only nodes who's version is within the range will receive the +request. The syntax for the pattern is the same as when `version` is within +`skip`. +* `attribute`: Only nodes that have an attribute matching the name and value +of the provided attribute match. Looks like: +.... + node_selector: + attribute: + name: value +.... + === `set` For some tests, it is necessary to extract a value from the previous `response`, in diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml index 8fafd9ef250aa..6f7c5a6009386 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -79,7 +79,6 @@ indices.get_alias: index: test_index - - match: {test_index.aliases.test_alias: {}} - match: {test_index.aliases.test_blias.search_routing: b} - match: {test_index.aliases.test_blias.index_routing: b} - is_false: test_index.aliases.test_blias.filter @@ -87,6 +86,30 @@ - is_false: test_index.aliases.test_clias.index_routing - is_false: test_index.aliases.test_clias.search_routing +--- +"Create index with write aliases": + - skip: + version: " - 6.99.99" + reason: is_write_index is not implemented in ES <= 6.x + - do: + indices.create: + index: test_index + body: + aliases: + test_alias: {} + test_blias: + is_write_index: false + test_clias: + is_write_index: true + + - do: + indices.get_alias: + index: test_index + + - is_false: test_index.aliases.test_alias.is_write_index + - is_false: test_index.aliases.test_blias.is_write_index + - is_true: test_index.aliases.test_clias.is_write_index + --- "Create index with no type mappings": - do: diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index db3885eb62fab..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8cd761f40c4a89ed977167f0518d12e409eaf3d8 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..4c0db7a735c8d --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +557d62d2b13d3dcb1810a1633e22625e42425425 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index bd8711a4d53d9..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c93ed67599d345b9359586248ab92342d7d3033 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..0579316096a72 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +d3755ad4c98b49fe5055b32358e3071727177c03 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 36bf03bbbdb54..0000000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -003ed080e5184661e606091cd321c229798b22f8 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..134072bc13701 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0f940ee9c7ac7..0000000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b4be9f96edfd3dbcff5aa9b3f0914e86eb9cc51 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..8a3327cc8a227 --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +b62ebd53bbefb2f59cd246157a6768cae8a5a3a1 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index fdc9336fb2ce2..0000000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a5dcceb5bc017cee6ab5d3ee1943aca1ac6fe074 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..75fb5a7755639 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +cba0fd4ccb98db8a72287a95d6b653e455f9eeb3 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 62726ca415a48..0000000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b59e7441f121da969bef8eef2c0c61743b4230a8 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..01e0197bc1713 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +5127ed0b7516f8b28d84e837df4f33c67e361f6c \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index a68093d2fc42e..0000000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46736dbb07b432f0a7c1b3080f62932c483e5cb9 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..3d6069f2a5c8b --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +45c7b13aae1104f9f5f0fca0606e5741309c8d74 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 23e2b68f3dfcf..0000000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee203718d525da0c6258a51a5a32d877089fe5af \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..a74be59aea39c --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +2540c4b5d9dca8a39a3b4d58efe4ab484df7254f \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 5bac053813ea2..0000000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf17a332d8e42a45e8f013d5df408f4391d2620a \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..cf26412b63f80 --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +e9d0c0c020917d4bf9b590526866ff5547dbaa17 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 471aa797028a7..0000000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04832303d70502d2ece44501cb1716f42e24fe35 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..63533b774673f --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +50969cdb7279047fbec94dda6e7d74d1c73c07f8 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 486dafc10c73f..0000000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -639313e3a9573779b6a28b45a7f57fc1f73ffa46 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..4eab31d62bd41 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +94524b293572b1f0d01a0faeeade1ff24713f966 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0a083b5a078ac..0000000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6144b493ba3588a638858d0058054758acc619b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..ae5a2ea0375fd --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +878db723e41ece636ed338c4ef374e900f221a14 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 851b0d76d3e7a..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d00c6b8bbbbb496aecd555406267fee9e0af914 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..9f5129d89056a --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +c8dc85c32aeac6ff320aa6a9ea57881ad4847a55 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 22ce3c7244338..0000000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -159cdb6d36845690cb1972d02cc0b472bb14b7f3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..02fcef681fc30 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +203d8d22ab172e624784a5fdeaecdd01ae25fb3d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 0724381bcc6a6..0000000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af1dd0218d58990cca5c1592d9722e67d233c996 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..a7daa7ff02a38 --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +4d6cf8fa1064a86991d5cd12a2ed32119ac91212 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index 9172500a8cb50..10ee8877fc9c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -49,6 +50,7 @@ public class Alias implements Streamable, ToXContentFragment { private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing"); private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing"); + private static final ParseField IS_WRITE_INDEX = new ParseField("is_write_index"); private String name; @@ -61,6 +63,9 @@ public class Alias implements Streamable, ToXContentFragment { @Nullable private String searchRouting; + @Nullable + private Boolean writeIndex; + private Alias() { } @@ -167,6 +172,21 @@ public Alias searchRouting(String searchRouting) { return this; } + /** + * @return the write index flag for the alias + */ + public Boolean writeIndex() { + return writeIndex; + } + + /** + * Sets whether an alias is pointing to a write-index + */ + public Alias writeIndex(@Nullable Boolean writeIndex) { + this.writeIndex = writeIndex; + return this; + } + /** * Allows to read an alias from the provided input stream */ @@ -182,6 +202,11 @@ public void readFrom(StreamInput in) throws IOException { filter = in.readOptionalString(); indexRouting = in.readOptionalString(); searchRouting = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + writeIndex = in.readOptionalBoolean(); + } else { + writeIndex = null; + } } @Override @@ -190,6 +215,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(filter); out.writeOptionalString(indexRouting); out.writeOptionalString(searchRouting); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeOptionalBoolean(writeIndex); + } } /** @@ -219,6 +247,10 @@ public static Alias fromXContent(XContentParser parser) throws IOException { } else if (SEARCH_ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { alias.searchRouting(parser.text()); } + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + if (IS_WRITE_INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + alias.writeIndex(parser.booleanValue()); + } } } return alias; @@ -245,6 +277,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } + builder.field(IS_WRITE_INDEX.getPreferredName(), writeIndex); + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 6332f50c1452e..00e3f7e32df3b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -84,6 +85,7 @@ public static class AliasActions implements AliasesRequest, Writeable, ToXConten private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField INDEX_ROUTING = new ParseField("index_routing", "indexRouting", "index-routing"); private static final ParseField SEARCH_ROUTING = new ParseField("search_routing", "searchRouting", "search-routing"); + private static final ParseField IS_WRITE_INDEX = new ParseField("is_write_index"); private static final ParseField ADD = new ParseField("add"); private static final ParseField REMOVE = new ParseField("remove"); @@ -179,6 +181,7 @@ private static ObjectParser parser(String name, Supplier REMOVE_PARSER = parser(REMOVE.getPreferredName(), AliasActions::remove); private static final ObjectParser REMOVE_INDEX_PARSER = parser(REMOVE_INDEX.getPreferredName(), @@ -215,6 +218,7 @@ private static ObjectParser parser(String name, Supplier implements Writeable, ToXContentFragment { + + public static final ParseField CONDITION_FIELD = new ParseField("met_conditions"); + public static final ParseField TIME_FIELD = new ParseField("time"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("rollover_info", false, + (a, alias) -> new RolloverInfo(alias, (List) a[0], (Long) a[1])); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(Condition.class, n, c), CONDITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_FIELD); + } + + private final String alias; + private final List metConditions; + private final long time; + + public RolloverInfo(String alias, List metConditions, long time) { + this.alias = alias; + this.metConditions = metConditions; + this.time = time; + } + + public RolloverInfo(StreamInput in) throws IOException { + this.alias = in.readString(); + this.time = in.readVLong(); + this.metConditions = in.readNamedWriteableList(Condition.class); + } + + public static RolloverInfo parse(XContentParser parser, String alias) { + return PARSER.apply(parser, alias); + } + + public String getAlias() { + return alias; + } + + public List getMetConditions() { + return metConditions; + } + + public long getTime() { + return time; + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(RolloverInfo::new, in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(alias); + out.writeVLong(time); + out.writeNamedWriteableList(metConditions); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(alias); + builder.startObject(CONDITION_FIELD.getPreferredName()); + for (Condition condition : metConditions) { + condition.toXContent(builder, params); + } + builder.endObject(); + builder.field(TIME_FIELD.getPreferredName(), time); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(alias, metConditions, time); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverInfo other = (RolloverInfo) obj; + return Objects.equals(alias, other.alias) && + Objects.equals(metConditions, other.metConditions) && + Objects.equals(time, other.time); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index a5385c42aa0af..c7780d41fabd4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.AliasAction; @@ -131,7 +132,9 @@ public void onResponse(IndicesStatsResponse statsResponse) { new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false)); return; } - if (conditionResults.size() == 0 || conditionResults.values().stream().anyMatch(result -> result)) { + List metConditions = rolloverRequest.getConditions().values().stream() + .filter(condition -> conditionResults.get(condition.toString())).collect(Collectors.toList()); + if (conditionResults.size() == 0 || metConditions.size() > 0) { CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName, rolloverRequest); createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> { @@ -141,13 +144,33 @@ public void onResponse(IndicesStatsResponse statsResponse) { rolloverRequest), ActionListener.wrap(aliasClusterStateUpdateResponse -> { if (aliasClusterStateUpdateResponse.isAcknowledged()) { - activeShardsObserver.waitForActiveShards(new String[]{rolloverIndexName}, - rolloverRequest.getCreateIndexRequest().waitForActiveShards(), - rolloverRequest.masterNodeTimeout(), - isShardsAcknowledged -> listener.onResponse(new RolloverResponse( - sourceIndexName, rolloverIndexName, conditionResults, false, true, true, - isShardsAcknowledged)), - listener::onFailure); + clusterService.submitStateUpdateTask("update_rollover_info", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + RolloverInfo rolloverInfo = new RolloverInfo(rolloverRequest.getAlias(), metConditions, + threadPool.absoluteTimeInMillis()); + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentState.metaData()) + .put(IndexMetaData.builder(currentState.metaData().index(sourceIndexName)) + .putRolloverInfo(rolloverInfo))).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + activeShardsObserver.waitForActiveShards(new String[]{rolloverIndexName}, + rolloverRequest.getCreateIndexRequest().waitForActiveShards(), + rolloverRequest.masterNodeTimeout(), + isShardsAcknowledged -> listener.onResponse(new RolloverResponse( + sourceIndexName, rolloverIndexName, conditionResults, false, true, true, + isShardsAcknowledged)), + listener::onFailure); + } + }); } else { listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, false, true, false, false)); @@ -173,7 +196,7 @@ public void onFailure(Exception e) { static IndicesAliasesClusterStateUpdateRequest prepareRolloverAliasesUpdateRequest(String oldIndex, String newIndex, RolloverRequest request) { List actions = unmodifiableList(Arrays.asList( - new AliasAction.Add(newIndex, request.getAlias(), null, null, null), + new AliasAction.Add(newIndex, request.getAlias(), null, null, null, null), new AliasAction.Remove(oldIndex, request.getAlias()))); final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(actions) .ackTimeout(request.ackTimeout()) diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 8ef6e4cd15bff..ecdecc4457bdd 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.node.InternalSettingsPreparer; import org.elasticsearch.node.Node; @@ -150,9 +151,11 @@ private static ClientTemplate buildTemplate(Settings providedSettings, Settings SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter); SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class)); + IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); List entries = new ArrayList<>(); entries.addAll(NetworkModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables()); + entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(ClusterModule.getNamedWriteables()); entries.addAll(pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.getNamedWriteables().stream()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index ff49d072815fb..436ae79c10319 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -51,7 +51,7 @@ public String getIndex() { /** * Apply the action. - * + * * @param aliasValidator call to validate a new alias before adding it to the builder * @param metadata metadata builder for the changes made by all actions as part of this request * @param index metadata for the index being changed @@ -64,7 +64,7 @@ public String getIndex() { */ @FunctionalInterface public interface NewAliasValidator { - void validate(String alias, @Nullable String indexRouting, @Nullable String filter); + void validate(String alias, @Nullable String indexRouting, @Nullable String filter, @Nullable Boolean writeIndex); } /** @@ -82,10 +82,14 @@ public static class Add extends AliasAction { @Nullable private final String searchRouting; + @Nullable + private final Boolean writeIndex; + /** * Build the operation. */ - public Add(String index, String alias, @Nullable String filter, @Nullable String indexRouting, @Nullable String searchRouting) { + public Add(String index, String alias, @Nullable String filter, @Nullable String indexRouting, + @Nullable String searchRouting, @Nullable Boolean writeIndex) { super(index); if (false == Strings.hasText(alias)) { throw new IllegalArgumentException("[alias] is required"); @@ -94,6 +98,7 @@ public Add(String index, String alias, @Nullable String filter, @Nullable String this.filter = filter; this.indexRouting = indexRouting; this.searchRouting = searchRouting; + this.writeIndex = writeIndex; } /** @@ -103,6 +108,10 @@ public String getAlias() { return alias; } + public Boolean writeIndex() { + return writeIndex; + } + @Override boolean removeIndex() { return false; @@ -110,15 +119,18 @@ boolean removeIndex() { @Override boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, IndexMetaData index) { - aliasValidator.validate(alias, indexRouting, filter); + aliasValidator.validate(alias, indexRouting, filter, writeIndex); + AliasMetaData newAliasMd = AliasMetaData.newAliasMetaDataBuilder(alias).filter(filter).indexRouting(indexRouting) - .searchRouting(searchRouting).build(); + .searchRouting(searchRouting).writeIndex(writeIndex).build(); + // Check if this alias already exists AliasMetaData currentAliasMd = index.getAliases().get(alias); if (currentAliasMd != null && currentAliasMd.equals(newAliasMd)) { // It already exists, ignore it return false; } + metadata.put(IndexMetaData.builder(index).putAlias(newAliasMd)); return true; } @@ -182,4 +194,4 @@ boolean apply(NewAliasValidator aliasValidator, MetaData.Builder metadata, Index throw new UnsupportedOperationException(); } } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 945c94bcd642d..29455123287a6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -20,8 +20,10 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -55,7 +57,10 @@ public class AliasMetaData extends AbstractDiffable implements To private final Set searchRoutingValues; - private AliasMetaData(String alias, CompressedXContent filter, String indexRouting, String searchRouting) { + @Nullable + private final Boolean writeIndex; + + private AliasMetaData(String alias, CompressedXContent filter, String indexRouting, String searchRouting, Boolean writeIndex) { this.alias = alias; this.filter = filter; this.indexRouting = indexRouting; @@ -65,10 +70,11 @@ private AliasMetaData(String alias, CompressedXContent filter, String indexRouti } else { searchRoutingValues = emptySet(); } + this.writeIndex = writeIndex; } private AliasMetaData(AliasMetaData aliasMetaData, String alias) { - this(alias, aliasMetaData.filter(), aliasMetaData.indexRouting(), aliasMetaData.searchRouting()); + this(alias, aliasMetaData.filter(), aliasMetaData.indexRouting(), aliasMetaData.searchRouting(), aliasMetaData.writeIndex()); } public String alias() { @@ -111,6 +117,10 @@ public Set searchRoutingValues() { return searchRoutingValues; } + public Boolean writeIndex() { + return writeIndex; + } + public static Builder builder(String alias) { return new Builder(alias); } @@ -138,6 +148,8 @@ public boolean equals(Object o) { if (indexRouting != null ? !indexRouting.equals(that.indexRouting) : that.indexRouting != null) return false; if (searchRouting != null ? !searchRouting.equals(that.searchRouting) : that.searchRouting != null) return false; + if (writeIndex != null ? writeIndex != that.writeIndex : that.writeIndex != null) + return false; return true; } @@ -148,6 +160,7 @@ public int hashCode() { result = 31 * result + (filter != null ? filter.hashCode() : 0); result = 31 * result + (indexRouting != null ? indexRouting.hashCode() : 0); result = 31 * result + (searchRouting != null ? searchRouting.hashCode() : 0); + result = 31 * result + (writeIndex != null ? writeIndex.hashCode() : 0); return result; } @@ -173,6 +186,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeOptionalBoolean(writeIndex()); + } } public AliasMetaData(StreamInput in) throws IOException { @@ -194,6 +210,11 @@ public AliasMetaData(StreamInput in) throws IOException { searchRouting = null; searchRoutingValues = emptySet(); } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + writeIndex = in.readOptionalBoolean(); + } else { + writeIndex = null; + } } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -221,6 +242,9 @@ public static class Builder { private String searchRouting; + @Nullable + private Boolean writeIndex; + public Builder(String alias) { this.alias = alias; @@ -231,6 +255,7 @@ public Builder(AliasMetaData aliasMetaData) { filter = aliasMetaData.filter(); indexRouting = aliasMetaData.indexRouting(); searchRouting = aliasMetaData.searchRouting(); + writeIndex = aliasMetaData.writeIndex(); } public String alias() { @@ -284,8 +309,13 @@ public Builder searchRouting(String searchRouting) { return this; } + public Builder writeIndex(@Nullable Boolean writeIndex) { + this.writeIndex = writeIndex; + return this; + } + public AliasMetaData build() { - return new AliasMetaData(alias, filter, indexRouting, searchRouting); + return new AliasMetaData(alias, filter, indexRouting, searchRouting, writeIndex); } public static void toXContent(AliasMetaData aliasMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -307,6 +337,10 @@ public static void toXContent(AliasMetaData aliasMetaData, XContentBuilder build builder.field("search_routing", aliasMetaData.searchRouting()); } + if (aliasMetaData.writeIndex() != null) { + builder.field("is_write_index", aliasMetaData.writeIndex()); + } + builder.endObject(); } @@ -343,6 +377,10 @@ public static AliasMetaData fromXContent(XContentParser parser) throws IOExcepti } } else if (token == XContentParser.Token.START_ARRAY) { parser.skipChildren(); + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + if ("is_write_index".equals(currentFieldName)) { + builder.writeIndex(parser.booleanValue()); + } } } return builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java index 786bd9af78a4c..d8bb04a1a39c3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java @@ -19,12 +19,16 @@ package org.elasticsearch.cluster.metadata; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.stream.Collectors; /** * Encapsulates the {@link IndexMetaData} instances of a concrete index or indices an alias is pointing to. @@ -78,6 +82,7 @@ class Alias implements AliasOrIndex { private final String aliasName; private final List referenceIndexMetaDatas; + private SetOnce writeIndex = new SetOnce<>(); public Alias(AliasMetaData aliasMetaData, IndexMetaData indexMetaData) { this.aliasName = aliasMetaData.getAlias(); @@ -90,11 +95,21 @@ public boolean isAlias() { return true; } + public String getAliasName() { + return aliasName; + } + @Override public List getIndices() { return referenceIndexMetaDatas; } + + @Nullable + public IndexMetaData getWriteIndex() { + return writeIndex.get(); + } + /** * Returns the unique alias metadata per concrete index. * @@ -138,5 +153,20 @@ void addIndex(IndexMetaData indexMetaData) { this.referenceIndexMetaDatas.add(indexMetaData); } + public void computeAndValidateWriteIndex() { + List writeIndices = referenceIndexMetaDatas.stream() + .filter(idxMeta -> Boolean.TRUE.equals(idxMeta.getAliases().get(aliasName).writeIndex())) + .collect(Collectors.toList()); + if (referenceIndexMetaDatas.size() == 1) { + writeIndex.set(referenceIndexMetaDatas.get(0)); + } else if (writeIndices.size() == 1) { + writeIndex.set(writeIndices.get(0)); + } else if (writeIndices.size() > 1) { + List writeIndicesStrings = writeIndices.stream() + .map(i -> i.getIndex().getName()).collect(Collectors.toList()); + throw new IllegalStateException("alias [" + aliasName + "] has more than one write index [" + + Strings.collectionToCommaDelimitedString(writeIndicesStrings) + "]"); + } + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 7f1348dd1587f..33e1687e241fa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -57,7 +57,7 @@ public AliasValidator(Settings settings) { * @throws IllegalArgumentException if the alias is not valid */ public void validateAlias(Alias alias, String index, MetaData metaData) { - validateAlias(alias.name(), index, alias.indexRouting(), name -> metaData.index(name)); + validateAlias(alias.name(), index, alias.indexRouting(), metaData::index); } /** @@ -66,7 +66,7 @@ public void validateAlias(Alias alias, String index, MetaData metaData) { * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasMetaData(AliasMetaData aliasMetaData, String index, MetaData metaData) { - validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), name -> metaData.index(name)); + validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), metaData::index); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index c871dd6183042..9e4f849787867 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -25,6 +25,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; @@ -294,6 +295,7 @@ public Iterator> settings() { static final String KEY_STATE = "state"; static final String KEY_MAPPINGS = "mappings"; static final String KEY_ALIASES = "aliases"; + static final String KEY_ROLLOVER_INFOS = "rollover_info"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; @@ -331,13 +333,14 @@ public Iterator> settings() { private final Version indexUpgradedVersion; private final ActiveShardCount waitForActiveShards; + private final ImmutableOpenMap rolloverInfos; private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, - int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards) { + int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { this.index = index; this.version = version; @@ -362,6 +365,7 @@ private IndexMetaData(Index index, long version, long[] primaryTerms, State stat this.routingFactor = routingNumShards / numberOfShards; this.routingPartitionSize = routingPartitionSize; this.waitForActiveShards = waitForActiveShards; + this.rolloverInfos = rolloverInfos; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -517,6 +521,10 @@ public ImmutableOpenIntMap> getInSyncAllocationIds() { return inSyncAllocationIds; } + public ImmutableOpenMap getRolloverInfos() { + return rolloverInfos; + } + public Set inSyncAllocationIds(int shardId) { assert shardId >= 0 && shardId < numberOfShards; return inSyncAllocationIds.get(shardId); @@ -587,6 +595,9 @@ public boolean equals(Object o) { if (!inSyncAllocationIds.equals(that.inSyncAllocationIds)) { return false; } + if (rolloverInfos.equals(that.rolloverInfos) == false) { + return false; + } return true; } @@ -603,6 +614,7 @@ public int hashCode() { result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + inSyncAllocationIds.hashCode(); + result = 31 * result + rolloverInfos.hashCode(); return result; } @@ -638,6 +650,7 @@ private static class IndexMetaDataDiff implements Diff { private final Diff> aliases; private final Diff> customs; private final Diff>> inSyncAllocationIds; + private final Diff> rolloverInfos; IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { index = after.index.getName(); @@ -651,6 +664,7 @@ private static class IndexMetaDataDiff implements Diff { customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); + rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); } IndexMetaDataDiff(StreamInput in) throws IOException { @@ -679,6 +693,13 @@ public Diff readDiff(StreamInput in, String key) throws IOException { }); inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + rolloverInfos = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), RolloverInfo::new, + RolloverInfo::readDiffFrom); + } else { + ImmutableOpenMap emptyMap = ImmutableOpenMap.of(); + rolloverInfos = DiffableUtils.diff(emptyMap, emptyMap, DiffableUtils.getStringKeySerializer()); + } } @Override @@ -693,6 +714,9 @@ public void writeTo(StreamOutput out) throws IOException { aliases.writeTo(out); customs.writeTo(out); inSyncAllocationIds.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + rolloverInfos.writeTo(out); + } } @Override @@ -707,6 +731,7 @@ public IndexMetaData apply(IndexMetaData part) { builder.aliases.putAll(aliases.apply(part.aliases)); builder.customs.putAll(customs.apply(part.customs)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); + builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); return builder.build(); } } @@ -740,6 +765,12 @@ public static IndexMetaData readFrom(StreamInput in) throws IOException { Set allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key); builder.putInSyncAllocationIds(key, allocationIds); } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + int rolloverAliasesSize = in.readVInt(); + for (int i = 0; i < rolloverAliasesSize; i++) { + builder.putRolloverInfo(new RolloverInfo(in)); + } + } return builder.build(); } @@ -769,6 +800,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(cursor.key); DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out); } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeVInt(rolloverInfos.size()); + for (ObjectCursor cursor : rolloverInfos.values()) { + cursor.value.writeTo(out); + } + } } public static Builder builder(String index) { @@ -790,6 +827,7 @@ public static class Builder { private final ImmutableOpenMap.Builder aliases; private final ImmutableOpenMap.Builder customs; private final ImmutableOpenIntMap.Builder> inSyncAllocationIds; + private final ImmutableOpenMap.Builder rolloverInfos; private Integer routingNumShards; public Builder(String index) { @@ -798,6 +836,7 @@ public Builder(String index) { this.aliases = ImmutableOpenMap.builder(); this.customs = ImmutableOpenMap.builder(); this.inSyncAllocationIds = ImmutableOpenIntMap.builder(); + this.rolloverInfos = ImmutableOpenMap.builder(); } public Builder(IndexMetaData indexMetaData) { @@ -811,6 +850,7 @@ public Builder(IndexMetaData indexMetaData) { this.customs = ImmutableOpenMap.builder(indexMetaData.customs); this.routingNumShards = indexMetaData.routingNumShards; this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds); + this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos); } public String index() { @@ -951,6 +991,15 @@ public Builder putInSyncAllocationIds(int shardId, Set allocationIds) { return this; } + public RolloverInfo getRolloverInfo(String alias) { + return rolloverInfos.get(alias); + } + + public Builder putRolloverInfo(RolloverInfo rolloverInfo) { + rolloverInfos.put(rolloverInfo.getAlias(), rolloverInfo); + return this; + } + public long version() { return this.version; } @@ -1089,7 +1138,7 @@ public IndexMetaData build() { return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards); + indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -1143,6 +1192,12 @@ public static void toXContent(IndexMetaData indexMetaData, XContentBuilder build } builder.endObject(); + builder.startObject(KEY_ROLLOVER_INFOS); + for (ObjectCursor cursor : indexMetaData.getRolloverInfos().values()) { + cursor.value.toXContent(builder, params); + } + builder.endObject(); + builder.endObject(); } @@ -1202,6 +1257,16 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti throw new IllegalArgumentException("Unexpected token: " + token); } } + } else if (KEY_ROLLOVER_INFOS.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + builder.putRolloverInfo(RolloverInfo.parse(parser, currentFieldName)); + } else { + throw new IllegalArgumentException("Unexpected token: " + token); + } + } } else if ("warmers".equals(currentFieldName)) { // TODO: do this in 6.0: // throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 5657873987e18..2bfe0d0a58f70 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -1039,7 +1039,22 @@ public MetaData build() { } - // build all indices map + SortedMap aliasAndIndexLookup = Collections.unmodifiableSortedMap(buildAliasAndIndexLookup()); + + + // build all concrete indices arrays: + // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices. + // When doing an operation across all indices, most of the time is spent on actually going to all shards and + // do the required operations, the bottleneck isn't resolving expressions into concrete indices. + String[] allIndicesArray = allIndices.toArray(new String[allIndices.size()]); + String[] allOpenIndicesArray = allOpenIndices.toArray(new String[allOpenIndices.size()]); + String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]); + + return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(), + customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, aliasAndIndexLookup); + } + + private SortedMap buildAliasAndIndexLookup() { SortedMap aliasAndIndexLookup = new TreeMap<>(); for (ObjectCursor cursor : indices.values()) { IndexMetaData indexMetaData = cursor.value; @@ -1059,17 +1074,9 @@ public MetaData build() { }); } } - aliasAndIndexLookup = Collections.unmodifiableSortedMap(aliasAndIndexLookup); - // build all concrete indices arrays: - // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices. - // When doing an operation across all indices, most of the time is spent on actually going to all shards and - // do the required operations, the bottleneck isn't resolving expressions into concrete indices. - String[] allIndicesArray = allIndices.toArray(new String[allIndices.size()]); - String[] allOpenIndicesArray = allOpenIndices.toArray(new String[allOpenIndices.size()]); - String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]); - - return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(), - customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, aliasAndIndexLookup); + aliasAndIndexLookup.values().stream().filter(AliasOrIndex::isAlias) + .forEach(alias -> ((AliasOrIndex.Alias) alias).computeAndValidateWriteIndex()); + return aliasAndIndexLookup; } public static String toXContent(MetaData metaData) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 0d8a374e66d42..be9db5262b00c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -516,7 +516,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } for (Alias alias : request.aliases()) { AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()) - .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); + .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).writeIndex(alias.writeIndex()).build(); indexMetaDataBuilder.putAlias(aliasMetaData); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java index 0c38371bdc9cb..28dc7f2425d91 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesService.java @@ -127,7 +127,7 @@ ClusterState innerExecute(ClusterState currentState, Iterable actio if (index == null) { throw new IndexNotFoundException(action.getIndex()); } - NewAliasValidator newAliasValidator = (alias, indexRouting, filter) -> { + NewAliasValidator newAliasValidator = (alias, indexRouting, filter, writeIndex) -> { /* It is important that we look up the index using the metadata builder we are modifying so we can remove an * index and replace it with an alias. */ Function indexLookup = name -> metadata.get(name); diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 70d26770a7bdc..cd8141ffa3c91 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -116,7 +116,7 @@ public NetworkModule(Settings settings, boolean transportClient, List> httpTransportFactory = plugin.getHttpTransports(settings, threadPool, bigArrays, - circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher); + pageCacheRecycler, circuitBreakerService, xContentRegistry, networkService, dispatcher); if (transportClient == false) { for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index c75754bde5855..4fad4159f55d8 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -21,6 +21,7 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.network.NetworkService; @@ -29,7 +30,9 @@ import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -48,11 +51,14 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; -public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements org.elasticsearch.http.HttpServerTransport { +public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { + public final HttpHandlingSettings handlingSettings; protected final NetworkService networkService; + protected final BigArrays bigArrays; protected final ThreadPool threadPool; protected final Dispatcher dispatcher; + private final NamedXContentRegistry xContentRegistry; protected final String[] bindHosts; protected final String[] publishHosts; @@ -61,11 +67,15 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo protected volatile BoundTransportAddress boundAddress; - protected AbstractHttpServerTransport(Settings settings, NetworkService networkService, ThreadPool threadPool, Dispatcher dispatcher) { + protected AbstractHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { super(settings); this.networkService = networkService; + this.bigArrays = bigArrays; this.threadPool = threadPool; + this.xContentRegistry = xContentRegistry; this.dispatcher = dispatcher; + this.handlingSettings = HttpHandlingSettings.fromSettings(settings); // we can't make the network.bind_host a fallback since we already fall back to http.host hence the extra conditional here List httpBindHost = SETTING_HTTP_BIND_HOST.get(settings); @@ -156,17 +166,94 @@ static int resolvePublishPort(Settings settings, List boundAdd return publishPort; } - public void dispatchRequest(final RestRequest request, final RestChannel channel) { + /** + * This method handles an incoming http request. + * + * @param httpRequest that is incoming + * @param httpChannel that received the http request + */ + public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { + handleIncomingRequest(httpRequest, httpChannel, null); + } + + /** + * This method handles an incoming http request that has encountered an error. + * + * @param httpRequest that is incoming + * @param httpChannel that received the http request + * @param exception that was encountered + */ + public void incomingRequestError(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { + handleIncomingRequest(httpRequest, httpChannel, exception); + } + + // Visible for testing + void dispatchRequest(final RestRequest restRequest, final RestChannel channel, final Throwable badRequestCause) { final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - dispatcher.dispatchRequest(request, channel, threadContext); + if (badRequestCause != null) { + dispatcher.dispatchBadRequest(restRequest, channel, threadContext, badRequestCause); + } else { + dispatcher.dispatchRequest(restRequest, channel, threadContext); + } } } - public void dispatchBadRequest(final RestRequest request, final RestChannel channel, final Throwable cause) { - final ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - dispatcher.dispatchBadRequest(request, channel, threadContext, cause); + private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { + Exception badRequestCause = exception; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final RestRequest restRequest; + { + RestRequest innerRestRequest; + try { + innerRestRequest = RestRequest.request(xContentRegistry, httpRequest, httpChannel); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); + innerRestRequest = requestWithoutContentTypeHeader(httpRequest, httpChannel, badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); + innerRestRequest = RestRequest.requestWithoutParameters(xContentRegistry, httpRequest, httpChannel); + } + restRequest = innerRestRequest; + } + + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these + * parameter values. + */ + final RestChannel channel; + { + RestChannel innerChannel; + ThreadContext threadContext = threadPool.getThreadContext(); + try { + innerChannel = new DefaultRestChannel(httpChannel, httpRequest, restRequest, bigArrays, handlingSettings, threadContext); + } catch (final IllegalArgumentException e) { + badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); + final RestRequest innerRequest = RestRequest.requestWithoutParameters(xContentRegistry, httpRequest, httpChannel); + innerChannel = new DefaultRestChannel(httpChannel, httpRequest, innerRequest, bigArrays, handlingSettings, threadContext); + } + channel = innerChannel; + } + + dispatchRequest(restRequest, channel, badRequestCause); + } + + private RestRequest requestWithoutContentTypeHeader(HttpRequest httpRequest, HttpChannel httpChannel, Exception badRequestCause) { + HttpRequest httpRequestWithoutContentType = httpRequest.removeHeader("Content-Type"); + try { + return RestRequest.request(xContentRegistry, httpRequestWithoutContentType, httpChannel); + } catch (final RestRequest.BadParameterException e) { + badRequestCause.addSuppressed(e); + return RestRequest.requestWithoutParameters(xContentRegistry, httpRequestWithoutContentType, httpChannel); } } } diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java new file mode 100644 index 0000000000000..f5924bb239eae --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -0,0 +1,172 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * The default rest channel for incoming requests. This class implements the basic logic for sending a rest + * response. It will set necessary headers nad ensure that bytes are released after the response is sent. + */ +public class DefaultRestChannel extends AbstractRestChannel implements RestChannel { + + static final String CLOSE = "close"; + static final String CONNECTION = "connection"; + static final String KEEP_ALIVE = "keep-alive"; + static final String CONTENT_TYPE = "content-type"; + static final String CONTENT_LENGTH = "content-length"; + static final String SET_COOKIE = "set-cookie"; + static final String X_OPAQUE_ID = "X-Opaque-Id"; + + private final HttpRequest httpRequest; + private final BigArrays bigArrays; + private final HttpHandlingSettings settings; + private final ThreadContext threadContext; + private final HttpChannel httpChannel; + + DefaultRestChannel(HttpChannel httpChannel, HttpRequest httpRequest, RestRequest request, BigArrays bigArrays, + HttpHandlingSettings settings, ThreadContext threadContext) { + super(request, settings.getDetailedErrorsEnabled()); + this.httpChannel = httpChannel; + this.httpRequest = httpRequest; + this.bigArrays = bigArrays; + this.settings = settings; + this.threadContext = threadContext; + } + + @Override + protected BytesStreamOutput newBytesOutput() { + return new ReleasableBytesStreamOutput(bigArrays); + } + + @Override + public void sendResponse(RestResponse restResponse) { + HttpResponse httpResponse; + if (RestRequest.Method.HEAD == request.method()) { + httpResponse = httpRequest.createResponse(restResponse.status(), BytesArray.EMPTY); + } else { + httpResponse = httpRequest.createResponse(restResponse.status(), restResponse.content()); + } + + // TODO: Ideally we should move the setting of Cors headers into :server + // NioCorsHandler.setCorsResponseHeaders(nettyRequest, resp, corsConfig); + + String opaque = request.header(X_OPAQUE_ID); + if (opaque != null) { + setHeaderField(httpResponse, X_OPAQUE_ID, opaque); + } + + // Add all custom headers + addCustomHeaders(httpResponse, restResponse.getHeaders()); + addCustomHeaders(httpResponse, threadContext.getResponseHeaders()); + + ArrayList toClose = new ArrayList<>(3); + + boolean success = false; + try { + // If our response doesn't specify a content-type header, set one + setHeaderField(httpResponse, CONTENT_TYPE, restResponse.contentType(), false); + // If our response has no content-length, calculate and set one + setHeaderField(httpResponse, CONTENT_LENGTH, String.valueOf(restResponse.content().length()), false); + + addCookies(httpResponse); + + BytesReference content = restResponse.content(); + if (content instanceof Releasable) { + toClose.add((Releasable) content); + } + BytesStreamOutput bytesStreamOutput = bytesOutputOrNull(); + if (bytesStreamOutput instanceof ReleasableBytesStreamOutput) { + toClose.add((Releasable) bytesStreamOutput); + } + + if (isCloseConnection()) { + toClose.add(httpChannel); + } + + ActionListener listener = ActionListener.wrap(() -> Releasables.close(toClose)); + httpChannel.sendResponse(httpResponse, listener); + success = true; + } finally { + if (success == false) { + Releasables.close(toClose); + } + } + + } + + private void setHeaderField(HttpResponse response, String headerField, String value) { + setHeaderField(response, headerField, value, true); + } + + private void setHeaderField(HttpResponse response, String headerField, String value, boolean override) { + if (override || !response.containsHeader(headerField)) { + response.addHeader(headerField, value); + } + } + + private void addCustomHeaders(HttpResponse response, Map> customHeaders) { + if (customHeaders != null) { + for (Map.Entry> headerEntry : customHeaders.entrySet()) { + for (String headerValue : headerEntry.getValue()) { + setHeaderField(response, headerEntry.getKey(), headerValue); + } + } + } + } + + private void addCookies(HttpResponse response) { + if (settings.isResetCookies()) { + List cookies = request.getHttpRequest().strictCookies(); + if (cookies.isEmpty() == false) { + for (String cookie : cookies) { + response.addHeader(SET_COOKIE, cookie); + } + } + } + } + + // Determine if the request connection should be closed on completion. + private boolean isCloseConnection() { + final boolean http10 = isHttp10(); + return CLOSE.equalsIgnoreCase(request.header(CONNECTION)) || (http10 && !KEEP_ALIVE.equalsIgnoreCase(request.header(CONNECTION))); + } + + // Determine if the request protocol version is HTTP 1.0 + private boolean isHttp10() { + return request.getHttpRequest().protocolVersion() == HttpRequest.HttpVersion.HTTP_1_0; + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpChannel.java b/server/src/main/java/org/elasticsearch/http/HttpChannel.java new file mode 100644 index 0000000000000..baea3e0c3b3c3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpChannel.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.lease.Releasable; + +import java.net.InetSocketAddress; + +public interface HttpChannel extends Releasable { + + /** + * Sends a http response to the channel. The listener will be executed once the send process has been + * completed. + * + * @param response to send to channel + * @param listener to execute upon send completion + */ + void sendResponse(HttpResponse response, ActionListener listener); + + /** + * Returns the local address for this channel. + * + * @return the local address of this channel. + */ + InetSocketAddress getLocalAddress(); + + /** + * Returns the remote address for this channel. Can be null if channel does not have a remote address. + * + * @return the remote address of this channel. + */ + InetSocketAddress getRemoteAddress(); + + /** + * Closes the channel. This might be an asynchronous process. There is no guarantee that the channel + * will be closed when this method returns. + */ + void close(); + +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java b/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java index 7db8666e73ae3..ae1520cba6002 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java +++ b/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java @@ -18,20 +18,17 @@ */ package org.elasticsearch.http; -public class HttpPipelinedMessage implements Comparable { +public interface HttpPipelinedMessage extends Comparable { - private final int sequence; - - public HttpPipelinedMessage(int sequence) { - this.sequence = sequence; - } - - public int getSequence() { - return sequence; - } + /** + * Get the sequence number for this message. + * + * @return the sequence number + */ + int getSequence(); @Override - public int compareTo(HttpPipelinedMessage o) { - return Integer.compare(sequence, o.sequence); + default int compareTo(HttpPipelinedMessage o) { + return Integer.compare(getSequence(), o.getSequence()); } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java b/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java index df8bd7ee1eb80..db3a2bae16714 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java +++ b/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java @@ -18,15 +18,21 @@ */ package org.elasticsearch.http; -public class HttpPipelinedRequest extends HttpPipelinedMessage { +public class HttpPipelinedRequest implements HttpPipelinedMessage { private final R request; + private final int sequence; HttpPipelinedRequest(int sequence, R request) { - super(sequence); + this.sequence = sequence; this.request = request; } + @Override + public int getSequence() { + return sequence; + } + public R getRequest() { return request; } diff --git a/server/src/main/java/org/elasticsearch/http/HttpRequest.java b/server/src/main/java/org/elasticsearch/http/HttpRequest.java new file mode 100644 index 0000000000000..496fec23312b0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpRequest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; + +import java.util.List; +import java.util.Map; + +/** + * A basic http request abstraction. Http modules needs to implement this interface to integrate with the + * server package's rest handling. + */ +public interface HttpRequest { + + enum HttpVersion { + HTTP_1_0, + HTTP_1_1 + } + + RestRequest.Method method(); + + /** + * The uri of the rest request, with the query string. + */ + String uri(); + + BytesReference content(); + + /** + * Get all of the headers and values associated with the headers. Modifications of this map are not supported. + */ + Map> getHeaders(); + + List strictCookies(); + + HttpVersion protocolVersion(); + + HttpRequest removeHeader(String header); + + /** + * Create an http response from this request and the supplied status and content. + */ + HttpResponse createResponse(RestStatus status, BytesReference content); + +} diff --git a/qa/smoke-test-rank-eval-with-mustache/build.gradle b/server/src/main/java/org/elasticsearch/http/HttpResponse.java similarity index 71% rename from qa/smoke-test-rank-eval-with-mustache/build.gradle rename to server/src/main/java/org/elasticsearch/http/HttpResponse.java index d88ee71916141..2d363f663c3ef 100644 --- a/qa/smoke-test-rank-eval-with-mustache/build.gradle +++ b/server/src/main/java/org/elasticsearch/http/HttpResponse.java @@ -17,11 +17,16 @@ * under the License. */ -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-test' +package org.elasticsearch.http; +/** + * A basic http response abstraction. Http modules must implement this interface as the server package rest + * handling needs to set http headers for a response. + */ +public interface HttpResponse { + + void addHeader(String name, String value); + + boolean containsHeader(String name); -dependencies { - testCompile project(path: ':modules:rank-eval', configuration: 'runtime') - testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 11024286b2274..2d41491e3a746 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -24,9 +24,12 @@ import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.resync.TransportResyncReplicationAction; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.BinaryFieldMapper; @@ -62,6 +65,7 @@ import org.elasticsearch.plugins.MapperPlugin; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; @@ -86,15 +90,26 @@ public IndicesModule(List mapperPlugins) { } private void registerBuiltinWritables() { - namedWritables.add(new Entry(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new)); - namedWritables.add(new Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new)); - namedWritables.add(new Entry(Condition.class, MaxSizeCondition.NAME, MaxSizeCondition::new)); + namedWritables.add(new NamedWriteableRegistry.Entry(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new)); + namedWritables.add(new NamedWriteableRegistry.Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new)); + namedWritables.add(new NamedWriteableRegistry.Entry(Condition.class, MaxSizeCondition.NAME, MaxSizeCondition::new)); } - public List getNamedWriteables() { + public List getNamedWriteables() { return namedWritables; } + public List getNamedXContents() { + return Arrays.asList( + new NamedXContentRegistry.Entry(Condition.class, new ParseField(MaxAgeCondition.NAME), (p, c) -> + MaxAgeCondition.fromXContent(p)), + new NamedXContentRegistry.Entry(Condition.class, new ParseField(MaxDocsCondition.NAME), (p, c) -> + MaxDocsCondition.fromXContent(p)), + new NamedXContentRegistry.Entry(Condition.class, new ParseField(MaxSizeCondition.NAME), (p, c) -> + MaxSizeCondition.fromXContent(p)) + ); + } + private Map getMappers(List mapperPlugins) { Map mappers = new LinkedHashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index b6274223d2260..aa9b3943e8863 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -389,6 +389,7 @@ protected Node(final Environment environment, Collection final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of( NetworkModule.getNamedXContents().stream(), + indicesModule.getNamedXContents().stream(), searchModule.getNamedXContents().stream(), pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.getNamedXContent().stream()), diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index d4d299b7e4af1..54dcffab6e366 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; @@ -77,8 +76,9 @@ public Status getStatus() { *

* This doesn't affect the status of this allocated task. */ - public void updatePersistentStatus(Task.Status status, ActionListener> listener) { - persistentTasksService.updateStatus(persistentTaskId, allocationId, status, listener); + public void updatePersistentTaskState(final PersistentTaskState state, + final ActionListener> listener) { + persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, listener); } public String getPersistentTaskId() { @@ -116,7 +116,7 @@ public void waitForPersistentTask(final Predicate void executeTask(Params params, - @Nullable Task.Status status, - AllocatedPersistentTask task, - PersistentTasksExecutor executor) { + public void executeTask(final Params params, + final @Nullable PersistentTaskState state, + final AllocatedPersistentTask task, + final PersistentTasksExecutor executor) { threadPool.executor(executor.getExecutor()).execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -49,14 +49,12 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { try { - executor.nodeOperation(task, params, status); + executor.nodeOperation(task, params, state); } catch (Exception ex) { task.markAsFailed(ex); } } }); - } - } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java new file mode 100644 index 0000000000000..57c913f51bb88 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskState.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +/** + * {@link PersistentTaskState} represents the state of the persistent tasks, as it + * is persisted in the cluster state. + */ +public interface PersistentTaskState extends ToXContentObject, NamedWriteable { +} diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 1464279a814d5..9ed0af010b530 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -35,7 +35,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.decider.AssignmentDecision; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; -import org.elasticsearch.tasks.Task; import java.util.Objects; @@ -178,27 +177,30 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } /** - * Update task status + * Update the state of a persistent task * - * @param id the id of a persistent task - * @param allocationId the expected allocation id of the persistent task - * @param status new status - * @param listener the listener that will be called when task is removed + * @param taskId the id of a persistent task + * @param taskAllocationId the expected allocation id of the persistent task + * @param taskState new state + * @param listener the listener that will be called when task is removed */ - public void updatePersistentTaskStatus(String id, long allocationId, Task.Status status, ActionListener> listener) { - clusterService.submitStateUpdateTask("update task status", new ClusterStateUpdateTask() { + public void updatePersistentTaskState(final String taskId, + final long taskAllocationId, + final PersistentTaskState taskState, + final ActionListener> listener) { + clusterService.submitStateUpdateTask("update task state", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { PersistentTasksCustomMetaData.Builder tasksInProgress = builder(currentState); - if (tasksInProgress.hasTask(id, allocationId)) { - return update(currentState, tasksInProgress.updateTaskStatus(id, status)); + if (tasksInProgress.hasTask(taskId, taskAllocationId)) { + return update(currentState, tasksInProgress.updateTaskState(taskId, taskState)); } else { - if (tasksInProgress.hasTask(id)) { - logger.warn("trying to update status on task {} with unexpected allocation id {}", id, allocationId); + if (tasksInProgress.hasTask(taskId)) { + logger.warn("trying to update state on task {} with unexpected allocation id {}", taskId, taskAllocationId); } else { - logger.warn("trying to update status on non-existing task {}", id); + logger.warn("trying to update state on non-existing task {}", taskId); } - throw new ResourceNotFoundException("the task with id {} and allocation id {} doesn't exist", id, allocationId); + throw new ResourceNotFoundException("the task with id {} and allocation id {} doesn't exist", taskId, taskAllocationId); } } @@ -209,7 +211,7 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(newState, id)); + listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(newState, taskId)); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index 09346704a801d..f81b7c770e56c 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -38,8 +38,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.Task.Status; import java.io.IOException; import java.util.Collection; @@ -61,13 +59,12 @@ * A cluster state record that contains a list of all running persistent tasks */ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable implements MetaData.Custom { - public static final String TYPE = "persistent_tasks"; + public static final String TYPE = "persistent_tasks"; private static final String API_CONTEXT = MetaData.XContentContext.API.toString(); // TODO: Implement custom Diff for tasks private final Map> tasks; - private final long lastAllocationId; public PersistentTasksCustomMetaData(long lastAllocationId, Map> tasks) { @@ -94,8 +91,8 @@ public PersistentTasksCustomMetaData(long lastAllocationId, Map, String> parser = new ObjectParser<>("named"); parser.declareObject(TaskDescriptionBuilder::setParams, (p, c) -> p.namedObject(PersistentTaskParams.class, c, null), new ParseField("params")); - parser.declareObject(TaskDescriptionBuilder::setStatus, - (p, c) -> p.namedObject(Status.class, c, null), new ParseField("status")); + parser.declareObject(TaskDescriptionBuilder::setState, + (p, c) -> p.namedObject(PersistentTaskState.class, c, null), new ParseField("state", "status")); TASK_DESCRIPTION_PARSER = (XContentParser p, Void c, String name) -> parser.parse(p, new TaskDescriptionBuilder<>(name), name); // Assignment parser @@ -115,7 +112,7 @@ public PersistentTasksCustomMetaData(long lastAllocationId, Map builder = objects.get(0); taskBuilder.setTaskName(builder.taskName); taskBuilder.setParams(builder.params); - taskBuilder.setStatus(builder.status); + taskBuilder.setState(builder.state); }, TASK_DESCRIPTION_PARSER, new ParseField("task")); PERSISTENT_TASK_PARSER.declareObject(TaskBuilder::setAssignment, ASSIGNMENT_PARSER, new ParseField("assignment")); PERSISTENT_TASK_PARSER.declareLong(TaskBuilder::setAllocationIdOnLastStatusUpdate, @@ -123,12 +120,13 @@ public PersistentTasksCustomMetaData(long lastAllocationId, Map { + private final String taskName; private Params params; - private Status status; + private PersistentTaskState state; private TaskDescriptionBuilder(String taskName) { this.taskName = taskName; @@ -139,8 +137,8 @@ private TaskDescriptionBuilder setParams(Params params) { return this; } - private TaskDescriptionBuilder setStatus(Status status) { - this.status = status; + private TaskDescriptionBuilder setState(PersistentTaskState state) { + this.state = state; return this; } } @@ -261,37 +259,34 @@ public String toString() { * A record that represents a single running persistent task */ public static class PersistentTask

implements Writeable, ToXContentObject { + private final String id; private final long allocationId; private final String taskName; private final P params; - @Nullable - private final Status status; + private final @Nullable PersistentTaskState state; private final Assignment assignment; - @Nullable - private final Long allocationIdOnLastStatusUpdate; + private final @Nullable Long allocationIdOnLastStatusUpdate; - public PersistentTask(String id, String taskName, P params, long allocationId, Assignment assignment) { - this(id, allocationId, taskName, params, null, assignment, null); + public PersistentTask(final String id, final String name, final P params, final long allocationId, final Assignment assignment) { + this(id, allocationId, name, params, null, assignment, null); } - public PersistentTask(PersistentTask

task, long allocationId, Assignment assignment) { - this(task.id, allocationId, task.taskName, task.params, task.status, - assignment, task.allocationId); + public PersistentTask(final PersistentTask

task, final long allocationId, final Assignment assignment) { + this(task.id, allocationId, task.taskName, task.params, task.state, assignment, task.allocationId); } - public PersistentTask(PersistentTask

task, Status status) { - this(task.id, task.allocationId, task.taskName, task.params, status, - task.assignment, task.allocationId); + public PersistentTask(final PersistentTask

task, final PersistentTaskState state) { + this(task.id, task.allocationId, task.taskName, task.params, state, task.assignment, task.allocationId); } - private PersistentTask(String id, long allocationId, String taskName, P params, - Status status, Assignment assignment, Long allocationIdOnLastStatusUpdate) { + private PersistentTask(final String id, final long allocationId, final String name, final P params, + final PersistentTaskState state, final Assignment assignment, final Long allocationIdOnLastStatusUpdate) { this.id = id; this.allocationId = allocationId; - this.taskName = taskName; + this.taskName = name; this.params = params; - this.status = status; + this.state = state; this.assignment = assignment; this.allocationIdOnLastStatusUpdate = allocationIdOnLastStatusUpdate; if (params != null) { @@ -300,10 +295,10 @@ private PersistentTask(String id, long allocationId, String taskName, P params, params.getWriteableName() + " task: " + taskName); } } - if (status != null) { - if (status.getWriteableName().equals(taskName) == false) { + if (state != null) { + if (state.getWriteableName().equals(taskName) == false) { throw new IllegalArgumentException("status has to have the same writeable name as task. status: " + - status.getWriteableName() + " task: " + taskName); + state.getWriteableName() + " task: " + taskName); } } } @@ -318,7 +313,7 @@ public PersistentTask(StreamInput in) throws IOException { } else { params = (P) in.readOptionalNamedWriteable(PersistentTaskParams.class); } - status = in.readOptionalNamedWriteable(Task.Status.class); + state = in.readOptionalNamedWriteable(PersistentTaskState.class); assignment = new Assignment(in.readOptionalString(), in.readString()); allocationIdOnLastStatusUpdate = in.readOptionalLong(); } @@ -333,7 +328,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeOptionalNamedWriteable(params); } - out.writeOptionalNamedWriteable(status); + out.writeOptionalNamedWriteable(state); out.writeOptionalString(assignment.executorNode); out.writeString(assignment.explanation); out.writeOptionalLong(allocationIdOnLastStatusUpdate); @@ -348,15 +343,14 @@ public boolean equals(Object o) { allocationId == that.allocationId && Objects.equals(taskName, that.taskName) && Objects.equals(params, that.params) && - Objects.equals(status, that.status) && + Objects.equals(state, that.state) && Objects.equals(assignment, that.assignment) && Objects.equals(allocationIdOnLastStatusUpdate, that.allocationIdOnLastStatusUpdate); } @Override public int hashCode() { - return Objects.hash(id, allocationId, taskName, params, status, assignment, - allocationIdOnLastStatusUpdate); + return Objects.hash(id, allocationId, taskName, params, state, assignment, allocationIdOnLastStatusUpdate); } @Override @@ -395,8 +389,8 @@ public boolean isAssigned() { } @Nullable - public Status getStatus() { - return status; + public PersistentTaskState getState() { + return state; } @Override @@ -411,8 +405,8 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params xPa if (params != null) { builder.field("params", params, xParams); } - if (status != null) { - builder.field("status", status, xParams); + if (state != null) { + builder.field("state", state, xParams); } } builder.endObject(); @@ -448,7 +442,7 @@ private static class TaskBuilder { private long allocationId; private String taskName; private Params params; - private Status status; + private PersistentTaskState state; private Assignment assignment = INITIAL_ASSIGNMENT; private Long allocationIdOnLastStatusUpdate; @@ -472,8 +466,8 @@ public TaskBuilder setParams(Params params) { return this; } - public TaskBuilder setStatus(Status status) { - this.status = status; + public TaskBuilder setState(PersistentTaskState state) { + this.state = state; return this; } @@ -489,8 +483,7 @@ public TaskBuilder setAllocationIdOnLastStatusUpdate(Long allocationIdOn } public PersistentTask build() { - return new PersistentTask<>(id, allocationId, taskName, params, status, - assignment, allocationIdOnLastStatusUpdate); + return new PersistentTask<>(id, allocationId, taskName, params, state, assignment, allocationIdOnLastStatusUpdate); } } @@ -608,13 +601,13 @@ public Builder reassignTask(String taskId, Assignment assignment) { } /** - * Updates the task status + * Updates the task state */ - public Builder updateTaskStatus(String taskId, Status status) { + public Builder updateTaskState(final String taskId, final PersistentTaskState taskState) { PersistentTask taskInProgress = tasks.get(taskId); if (taskInProgress != null) { changed = true; - tasks.put(taskId, new PersistentTask<>(taskInProgress, status)); + tasks.put(taskId, new PersistentTask<>(taskInProgress, taskState)); } else { throw new ResourceNotFoundException("cannot update task with id {" + taskId + "}, the task no longer exists"); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java index de75b1ff54085..758ffbe69a04d 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import java.util.Map; @@ -118,7 +117,7 @@ protected String getDescription(PersistentTask taskInProgress) { * NOTE: The nodeOperation has to throw an exception, trigger task.markAsCompleted() or task.completeAndNotifyIfNeeded() methods to * indicate that the persistent task has finished. */ - protected abstract void nodeOperation(AllocatedPersistentTask task, Params params, @Nullable Task.Status status); + protected abstract void nodeOperation(AllocatedPersistentTask task, Params params, @Nullable PersistentTaskState state); public String getExecutor() { return executor; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index 724e10c2c9030..91cdb400aa0d4 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -50,13 +50,13 @@ * non-transport client nodes in the cluster and monitors cluster state changes to detect started commands. */ public class PersistentTasksNodeService extends AbstractComponent implements ClusterStateListener { + private final Map runningTasks = new HashMap<>(); private final PersistentTasksService persistentTasksService; private final PersistentTasksExecutorRegistry persistentTasksExecutorRegistry; private final TaskManager taskManager; private final NodePersistentTasksExecutor nodePersistentTasksExecutor; - public PersistentTasksNodeService(Settings settings, PersistentTasksService persistentTasksService, PersistentTasksExecutorRegistry persistentTasksExecutorRegistry, @@ -172,7 +172,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, task.getPersistentTaskId(), task.getAllocationId()); try { runningTasks.put(taskInProgress.getAllocationId(), task); - nodePersistentTasksExecutor.executeTask(taskInProgress.getParams(), taskInProgress.getStatus(), task, executor); + nodePersistentTasksExecutor.executeTask(taskInProgress.getParams(), taskInProgress.getState(), task, executor); } catch (Exception e) { // Submit task failure task.markAsFailed(e); @@ -215,8 +215,8 @@ public void onFailure(Exception e) { } } - public static class Status implements Task.Status { + public static final String NAME = "persistent_executor"; private final AllocatedPersistentTask.State state; @@ -252,10 +252,6 @@ public String toString() { return Strings.toString(this); } - public AllocatedPersistentTask.State getState() { - return state; - } - @Override public boolean isFragment() { return false; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 01c28dd5cd634..d0c791e3df046 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -113,13 +112,14 @@ void sendCancelRequest(final long taskId, final String reason, final ActionListe * Notifies the master node that the state of a persistent task has changed. *

* Persistent task implementers shouldn't call this method directly and use - * {@link AllocatedPersistentTask#updatePersistentStatus} instead + * {@link AllocatedPersistentTask#updatePersistentTaskState} instead */ - void updateStatus(final String taskId, - final long taskAllocationID, - final Task.Status status, - final ActionListener> listener) { - UpdatePersistentTaskStatusAction.Request request = new UpdatePersistentTaskStatusAction.Request(taskId, taskAllocationID, status); + void sendUpdateStateRequest(final String taskId, + final long taskAllocationID, + final PersistentTaskState taskState, + final ActionListener> listener) { + UpdatePersistentTaskStatusAction.Request request = + new UpdatePersistentTaskStatusAction.Request(taskId, taskAllocationID, taskState); execute(request, UpdatePersistentTaskStatusAction.INSTANCE, listener); } diff --git a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java index a898558fc2668..a639e4bde5360 100644 --- a/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/elasticsearch/persistent/UpdatePersistentTaskStatusAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -63,16 +62,15 @@ public static class Request extends MasterNodeRequest { private String taskId; private long allocationId = -1L; - private Task.Status status; + private PersistentTaskState state; public Request() { - } - public Request(String taskId, long allocationId, Task.Status status) { + public Request(String taskId, long allocationId, PersistentTaskState state) { this.taskId = taskId; this.allocationId = allocationId; - this.status = status; + this.state = state; } public void setTaskId(String taskId) { @@ -83,8 +81,8 @@ public void setAllocationId(long allocationId) { this.allocationId = allocationId; } - public void setStatus(Task.Status status) { - this.status = status; + public void setState(PersistentTaskState state) { + this.state = state; } @Override @@ -92,7 +90,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); taskId = in.readString(); allocationId = in.readLong(); - status = in.readOptionalNamedWriteable(Task.Status.class); + state = in.readOptionalNamedWriteable(PersistentTaskState.class); } @Override @@ -100,7 +98,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(taskId); out.writeLong(allocationId); - out.writeOptionalNamedWriteable(status); + out.writeOptionalNamedWriteable(state); } @Override @@ -122,13 +120,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request request = (Request) o; - return Objects.equals(taskId, request.taskId) && allocationId == request.allocationId && - Objects.equals(status, request.status); + return Objects.equals(taskId, request.taskId) && allocationId == request.allocationId && Objects.equals(state, request.state); } @Override public int hashCode() { - return Objects.hash(taskId, allocationId, status); + return Objects.hash(taskId, allocationId, state); } } @@ -144,11 +141,10 @@ public final RequestBuilder setTaskId(String taskId) { return this; } - public final RequestBuilder setStatus(Task.Status status) { - request.setStatus(status); + public final RequestBuilder setState(PersistentTaskState state) { + request.setState(state); return this; } - } public static class TransportAction extends TransportMasterNodeAction { @@ -182,9 +178,10 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } @Override - protected final void masterOperation(final Request request, ClusterState state, + protected final void masterOperation(final Request request, + final ClusterState state, final ActionListener listener) { - persistentTasksClusterService.updatePersistentTaskStatus(request.taskId, request.allocationId, request.status, + persistentTasksClusterService.updatePersistentTaskState(request.taskId, request.allocationId, request.state, new ActionListener>() { @Override public void onResponse(PersistentTask task) { diff --git a/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java b/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java index df41036ffeabb..d33997fc82b99 100644 --- a/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java @@ -71,8 +71,8 @@ default Map> getTransports(Settings settings, Thread * See {@link org.elasticsearch.common.network.NetworkModule#HTTP_TYPE_SETTING} to configure a specific implementation. */ default Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { diff --git a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index d376b65ef2d88..4e3d652ec5d7e 100644 --- a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -40,7 +40,7 @@ public abstract class AbstractRestChannel implements RestChannel { private static final Predicate EXCLUDE_FILTER = INCLUDE_FILTER.negate(); protected final RestRequest request; - protected final boolean detailedErrorsEnabled; + private final boolean detailedErrorsEnabled; private final String format; private final String filterPath; private final boolean pretty; diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index aae63f041fad5..82fcf7178d1dd 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -272,8 +272,9 @@ boolean dispatchRequest(final RestRequest request, final RestChannel channel, fi */ private static boolean hasContentType(final RestRequest restRequest, final RestHandler restHandler) { if (restRequest.getXContentType() == null) { - if (restHandler.supportsContentStream() && restRequest.header("Content-Type") != null) { - final String lowercaseMediaType = restRequest.header("Content-Type").toLowerCase(Locale.ROOT); + String contentTypeHeader = restRequest.header("Content-Type"); + if (restHandler.supportsContentStream() && contentTypeHeader != null) { + final String lowercaseMediaType = contentTypeHeader.toLowerCase(Locale.ROOT); // we also support newline delimited JSON: http://specs.okfnlabs.org/ndjson/ if (lowercaseMediaType.equals("application/x-ndjson")) { restRequest.setXContentType(XContentType.JSON); diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index 65b4f9d1d3614..813d6feb55167 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -35,10 +35,11 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.HttpRequest; import java.io.IOException; import java.io.InputStream; -import java.net.SocketAddress; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -51,7 +52,7 @@ import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; -public abstract class RestRequest implements ToXContent.Params { +public class RestRequest implements ToXContent.Params { // tchar pattern as defined by RFC7230 section 3.2.6 private static final Pattern TCHAR_PATTERN = Pattern.compile("[a-zA-z0-9!#$%&'*+\\-.\\^_`|~]+"); @@ -62,18 +63,47 @@ public abstract class RestRequest implements ToXContent.Params { private final String rawPath; private final Set consumedParams = new HashSet<>(); private final SetOnce xContentType = new SetOnce<>(); + private final HttpRequest httpRequest; + private final HttpChannel httpChannel; + + protected RestRequest(NamedXContentRegistry xContentRegistry, Map params, String path, + Map> headers, HttpRequest httpRequest, HttpChannel httpChannel) { + final XContentType xContentType; + try { + xContentType = parseContentType(headers.get("Content-Type")); + } catch (final IllegalArgumentException e) { + throw new ContentTypeHeaderException(e); + } + if (xContentType != null) { + this.xContentType.set(xContentType); + } + this.xContentRegistry = xContentRegistry; + this.httpRequest = httpRequest; + this.httpChannel = httpChannel; + this.params = params; + this.rawPath = path; + this.headers = Collections.unmodifiableMap(headers); + } + + protected RestRequest(RestRequest restRequest) { + this(restRequest.getXContentRegistry(), restRequest.params(), restRequest.path(), restRequest.getHeaders(), + restRequest.getHttpRequest(), restRequest.getHttpChannel()); + } /** - * Creates a new REST request. + * Creates a new REST request. This method will throw {@link BadParameterException} if the path cannot be + * decoded * * @param xContentRegistry the content registry - * @param uri the raw URI that will be parsed into the path and the parameters - * @param headers a map of the header; this map should implement a case-insensitive lookup + * @param httpRequest the http request + * @param httpChannel the http channel * @throws BadParameterException if the parameters can not be decoded * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(final NamedXContentRegistry xContentRegistry, final String uri, final Map> headers) { - this(xContentRegistry, params(uri), path(uri), headers); + public static RestRequest request(NamedXContentRegistry xContentRegistry, HttpRequest httpRequest, HttpChannel httpChannel) { + Map params = params(httpRequest.uri()); + String path = path(httpRequest.uri()); + return new RestRequest(xContentRegistry, params, path, httpRequest.getHeaders(), httpRequest, httpChannel); } private static Map params(final String uri) { @@ -99,46 +129,34 @@ private static String path(final String uri) { } /** - * Creates a new REST request. In contrast to - * {@link RestRequest#RestRequest(NamedXContentRegistry, Map, String, Map)}, the path is not decoded so this constructor will not throw - * a {@link BadParameterException}. + * Creates a new REST request. The path is not decoded so this constructor will not throw a + * {@link BadParameterException}. * * @param xContentRegistry the content registry - * @param params the request parameters - * @param path the raw path (which is not parsed) - * @param headers a map of the header; this map should implement a case-insensitive lookup + * @param httpRequest the http request + * @param httpChannel the http channel * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest( - final NamedXContentRegistry xContentRegistry, - final Map params, - final String path, - final Map> headers) { - final XContentType xContentType; - try { - xContentType = parseContentType(headers.get("Content-Type")); - } catch (final IllegalArgumentException e) { - throw new ContentTypeHeaderException(e); - } - if (xContentType != null) { - this.xContentType.set(xContentType); - } - this.xContentRegistry = xContentRegistry; - this.params = params; - this.rawPath = path; - this.headers = Collections.unmodifiableMap(headers); + public static RestRequest requestWithoutParameters(NamedXContentRegistry xContentRegistry, HttpRequest httpRequest, + HttpChannel httpChannel) { + Map params = Collections.emptyMap(); + return new RestRequest(xContentRegistry, params, httpRequest.uri(), httpRequest.getHeaders(), httpRequest, httpChannel); } public enum Method { GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT } - public abstract Method method(); + public Method method() { + return httpRequest.method(); + } /** * The uri of the rest request, with the query string. */ - public abstract String uri(); + public String uri() { + return httpRequest.uri(); + } /** * The non decoded, raw path provided. @@ -154,9 +172,13 @@ public final String path() { return RestUtils.decodeComponent(rawPath()); } - public abstract boolean hasContent(); + public boolean hasContent() { + return content().length() > 0; + } - public abstract BytesReference content(); + public BytesReference content() { + return httpRequest.content(); + } /** * @return content of the request body or throw an exception if the body or content type is missing @@ -216,14 +238,12 @@ final void setXContentType(XContentType xContentType) { this.xContentType.set(xContentType); } - @Nullable - public SocketAddress getRemoteAddress() { - return null; + public HttpChannel getHttpChannel() { + return httpChannel; } - @Nullable - public SocketAddress getLocalAddress() { - return null; + public HttpRequest getHttpRequest() { + return httpRequest; } public final boolean hasParam(String key) { diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index 7e031f8d004e1..d0d6fa752d68e 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -20,10 +20,10 @@ package org.elasticsearch.rest; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -31,8 +31,7 @@ public abstract class RestResponse { - protected Map> customHeaders; - + private Map> customHeaders; /** * The response content type. @@ -81,10 +80,13 @@ public void addHeader(String name, String value) { } /** - * Returns custom headers that have been added, or null if none have been set. + * Returns custom headers that have been added. This method should not be used to mutate headers. */ - @Nullable public Map> getHeaders() { - return customHeaders; + if (customHeaders == null) { + return Collections.emptyMap(); + } else { + return customHeaders; + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java index 202530ccf9289..f2ae67e1fc1ab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java @@ -114,6 +114,7 @@ public void testParseAdd() throws IOException { Map filter = randomBoolean() ? randomMap(5) : null; Object searchRouting = randomBoolean() ? randomRouting() : null; Object indexRouting = randomBoolean() ? randomBoolean() ? searchRouting : randomRouting() : null; + boolean writeIndex = randomBoolean(); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); b.startObject(); { @@ -142,6 +143,7 @@ public void testParseAdd() throws IOException { if (indexRouting != null && false == indexRouting.equals(searchRouting)) { b.field("index_routing", indexRouting); } + b.field("is_write_index", writeIndex); } b.endObject(); } @@ -159,6 +161,7 @@ public void testParseAdd() throws IOException { } assertEquals(Objects.toString(searchRouting, null), action.searchRouting()); assertEquals(Objects.toString(indexRouting, null), action.indexRouting()); + assertEquals(writeIndex, action.writeIndex()); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java index dbca9f7a98f13..e50805ab5b263 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestTests.java @@ -93,6 +93,7 @@ public void testToXContent() throws IOException { Alias alias = new Alias("test_alias"); alias.routing("1"); alias.filter("{\"term\":{\"year\":2016}}"); + alias.writeIndex(true); request.alias(alias); Settings.Builder settings = Settings.builder(); @@ -103,7 +104,7 @@ public void testToXContent() throws IOException { String expectedRequestBody = "{\"settings\":{\"index\":{\"number_of_shards\":\"10\"}}," + "\"mappings\":{\"my_type\":{\"type\":{}}}," + - "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\"}}}"; + "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\",\"is_write_index\":true}}}"; assertEquals(expectedRequestBody, actualRequestBody); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 869bba452fefe..aa35d9d273a92 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -37,6 +37,7 @@ import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Set; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -44,6 +45,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.collection.IsEmptyCollection.empty; +import static org.hamcrest.core.CombinableMatcher.both; +import static org.hamcrest.number.OrderingComparison.greaterThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class RolloverIT extends ESIntegTestCase { @@ -70,6 +75,7 @@ public void testRolloverOnEmptyIndex() throws Exception { } public void testRollover() throws Exception { + long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; assertAcked(prepareCreate("test_index-2").addAlias(new Alias("test_alias")).get()); index("test_index-2", "type1", "1", "field", "value"); flush("test_index-2"); @@ -84,6 +90,11 @@ public void testRollover() throws Exception { assertFalse(oldIndex.getAliases().containsKey("test_alias")); final IndexMetaData newIndex = state.metaData().index("test_index-000003"); assertTrue(newIndex.getAliases().containsKey("test_alias")); + assertThat(oldIndex.getRolloverInfos().size(), equalTo(1)); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getAlias(), equalTo("test_alias")); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getMetConditions(), is(empty())); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getTime(), + is(both(greaterThanOrEqualTo(beforeTime)).and(lessThanOrEqualTo(client().threadPool().absoluteTimeInMillis() + 1000L)))); } public void testRolloverWithIndexSettings() throws Exception { @@ -246,17 +257,27 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("No rollover with a large max_size condition", response.isRolledOver(), equalTo(false)); + final IndexMetaData oldIndex = client().admin().cluster().prepareState().get().getState().metaData().index("test-1"); + assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } // A small max_size { + ByteSizeValue maxSizeValue = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES); + long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L; final RolloverResponse response = client().admin().indices() .prepareRolloverIndex("test_alias") - .addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES)) + .addMaxIndexSizeCondition(maxSizeValue) .get(); assertThat(response.getOldIndex(), equalTo("test-1")); assertThat(response.getNewIndex(), equalTo("test-000002")); assertThat("Should rollover with a small max_size condition", response.isRolledOver(), equalTo(true)); + final IndexMetaData oldIndex = client().admin().cluster().prepareState().get().getState().metaData().index("test-1"); + List metConditions = oldIndex.getRolloverInfos().get("test_alias").getMetConditions(); + assertThat(metConditions.size(), equalTo(1)); + assertThat(metConditions.get(0).toString(), equalTo(new MaxSizeCondition(maxSizeValue).toString())); + assertThat(oldIndex.getRolloverInfos().get("test_alias").getTime(), + is(both(greaterThanOrEqualTo(beforeTime)).and(lessThanOrEqualTo(client().threadPool().absoluteTimeInMillis() + 1000L)))); } // An empty index @@ -268,6 +289,8 @@ public void testRolloverMaxSize() throws Exception { assertThat(response.getOldIndex(), equalTo("test-000002")); assertThat(response.getNewIndex(), equalTo("test-000003")); assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); + final IndexMetaData oldIndex = client().admin().cluster().prepareState().get().getState().metaData().index("test-000002"); + assertThat(oldIndex.getRolloverInfos().size(), equalTo(0)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java index ba595de5215a3..4fa99374f0fab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java @@ -71,6 +71,7 @@ public void testToXContent() throws IOException { Alias alias = new Alias("test_alias"); alias.routing("1"); alias.filter("{\"term\":{\"year\":2016}}"); + alias.writeIndex(true); target.alias(alias); Settings.Builder settings = Settings.builder(); settings.put(SETTING_NUMBER_OF_SHARDS, 10); @@ -78,7 +79,7 @@ public void testToXContent() throws IOException { request.setTargetIndex(target); String actualRequestBody = Strings.toString(request); String expectedRequestBody = "{\"settings\":{\"index\":{\"number_of_shards\":\"10\"}}," + - "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\"}}}"; + "\"aliases\":{\"test_alias\":{\"filter\":{\"term\":{\"year\":2016}},\"routing\":\"1\",\"is_write_index\":true}}}"; assertEquals(expectedRequestBody, actualRequestBody); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java index 00865cc9a6579..de23c560eb9af 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AliasMetaDataTests.java @@ -41,6 +41,7 @@ public void testSerialization() throws IOException { .indexRouting("indexRouting") .routing("routing") .searchRouting("trim,tw , ltw , lw") + .writeIndex(randomBoolean() ? null : randomBoolean()) .build(); assertThat(before.searchRoutingValues(), equalTo(Sets.newHashSet("trim", "tw ", " ltw ", " lw"))); @@ -54,6 +55,21 @@ public void testSerialization() throws IOException { assertThat(after, equalTo(before)); } + @Override + protected void assertEqualInstances(AliasMetaData expectedInstance, AliasMetaData newInstance) { + assertNotSame(newInstance, expectedInstance); + if (expectedInstance.writeIndex() == null) { + expectedInstance = AliasMetaData.builder(expectedInstance.alias()) + .filter(expectedInstance.filter()) + .indexRouting(expectedInstance.indexRouting()) + .searchRouting(expectedInstance.searchRouting()) + .writeIndex(randomBoolean() ? null : randomBoolean()) + .build(); + } + assertEquals(expectedInstance, newInstance); + assertEquals(expectedInstance.hashCode(), newInstance.hashCode()); + } + @Override protected AliasMetaData createTestInstance() { return createTestItem(); @@ -95,6 +111,7 @@ private static AliasMetaData createTestItem() { if (randomBoolean()) { builder.filter("{\"term\":{\"year\":2016}}"); } + builder.writeIndex(randomBoolean()); return builder.build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 56fbf1db24502..744a29e843c48 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -69,6 +69,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -291,6 +292,32 @@ public void testValidateWaitForActiveShardsFailure() throws Exception { assertThat(e.getMessage(), containsString("invalid wait_for_active_shards")); } + public void testWriteIndex() throws Exception { + Boolean writeIndex = randomBoolean() ? null : randomBoolean(); + setupRequestAlias(new Alias("alias1").writeIndex(writeIndex)); + setupRequestMapping("mapping1", createMapping()); + setupRequestCustom("custom1", createCustom()); + reqSettings.put("key1", "value1"); + + final ClusterState result = executeTask(); + assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); + assertThat(result.metaData().index("test").getAliases().get("alias1").writeIndex(), equalTo(writeIndex)); + } + + public void testWriteIndexValidationException() throws Exception { + IndexMetaData existingWriteIndex = IndexMetaData.builder("test2") + .settings(settings(Version.CURRENT)).putAlias(AliasMetaData.builder("alias1").writeIndex(true).build()) + .numberOfShards(1).numberOfReplicas(0).build(); + idxBuilder.put("test2", existingWriteIndex); + setupRequestMapping("mapping1", createMapping()); + setupRequestCustom("custom1", createCustom()); + reqSettings.put("key1", "value1"); + setupRequestAlias(new Alias("alias1").writeIndex(true)); + + Exception exception = expectThrows(IllegalStateException.class, () -> executeTask()); + assertThat(exception.getMessage(), startsWith("alias [alias1] has more than one write index [")); + } + private IndexRoutingTable createIndexRoutingTableWithStartedShards(Index index) { final IndexRoutingTable idxRoutingTable = mock(IndexRoutingTable.class); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 7734a9d7b4e6a..9e8a5e04f43c1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -19,18 +19,31 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.Set; @@ -38,6 +51,23 @@ public class IndexMetaDataTests extends ESTestCase { + private IndicesModule INDICES_MODULE = new IndicesModule(Collections.emptyList()); + + @Before + public void setUp() throws Exception { + super.setUp(); + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + return new NamedWriteableRegistry(INDICES_MODULE.getNamedWriteables()); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(INDICES_MODULE.getNamedXContents()); + } + public void testIndexMetaDataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); @@ -50,7 +80,12 @@ public void testIndexMetaDataSerialization() throws IOException { .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) - .build(); + .putRolloverInfo( + new RolloverInfo(randomAlphaOfLength(5), + Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxSizeCondition(new ByteSizeValue(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong())), + randomNonNegativeLong())).build(); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); @@ -71,17 +106,20 @@ public void testIndexMetaDataSerialization() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); - IndexMetaData deserialized = IndexMetaData.readFrom(out.bytes().streamInput()); - assertEquals(metaData, deserialized); - assertEquals(metaData.hashCode(), deserialized.hashCode()); - - assertEquals(metaData.getNumberOfReplicas(), deserialized.getNumberOfReplicas()); - assertEquals(metaData.getNumberOfShards(), deserialized.getNumberOfShards()); - assertEquals(metaData.getCreationVersion(), deserialized.getCreationVersion()); - assertEquals(metaData.getRoutingNumShards(), deserialized.getRoutingNumShards()); - assertEquals(metaData.getCreationDate(), deserialized.getCreationDate()); - assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); - assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); + try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { + IndexMetaData deserialized = IndexMetaData.readFrom(in); + assertEquals(metaData, deserialized); + assertEquals(metaData.hashCode(), deserialized.hashCode()); + + assertEquals(metaData.getNumberOfReplicas(), deserialized.getNumberOfReplicas()); + assertEquals(metaData.getNumberOfShards(), deserialized.getNumberOfShards()); + assertEquals(metaData.getCreationVersion(), deserialized.getCreationVersion()); + assertEquals(metaData.getRoutingNumShards(), deserialized.getRoutingNumShards()); + assertEquals(metaData.getCreationDate(), deserialized.getCreationDate()); + assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); + assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); + assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos()); + } } public void testGetRoutingFactor() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java index e5b52d8cf52bf..812dfd8f6f686 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexAliasesServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; @@ -29,9 +30,13 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.List; import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anySetOf; import static org.mockito.Mockito.mock; @@ -64,7 +69,7 @@ public void testAddAndRemove() { ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), index); // Add an alias to it - ClusterState after = service.innerExecute(before, singletonList(new AliasAction.Add(index, "test", null, null, null))); + ClusterState after = service.innerExecute(before, singletonList(new AliasAction.Add(index, "test", null, null, null, null))); AliasOrIndex alias = after.metaData().getAliasAndIndexLookup().get("test"); assertNotNull(alias); assertTrue(alias.isAlias()); @@ -74,7 +79,7 @@ public void testAddAndRemove() { before = after; after = service.innerExecute(before, Arrays.asList( new AliasAction.Remove(index, "test"), - new AliasAction.Add(index, "test_2", null, null, null))); + new AliasAction.Add(index, "test_2", null, null, null, null))); assertNull(after.metaData().getAliasAndIndexLookup().get("test")); alias = after.metaData().getAliasAndIndexLookup().get("test_2"); assertNotNull(alias); @@ -95,7 +100,7 @@ public void testSwapIndexWithAlias() { // Now remove "test" and add an alias to "test" to "test_2" in one go ClusterState after = service.innerExecute(before, Arrays.asList( - new AliasAction.Add("test_2", "test", null, null, null), + new AliasAction.Add("test_2", "test", null, null, null, null), new AliasAction.RemoveIndex("test"))); AliasOrIndex alias = after.metaData().getAliasAndIndexLookup().get("test"); assertNotNull(alias); @@ -109,7 +114,7 @@ public void testAddAliasToRemovedIndex() { // Attempt to add an alias to "test" at the same time as we remove it IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> service.innerExecute(before, Arrays.asList( - new AliasAction.Add("test", "alias", null, null, null), + new AliasAction.Add("test", "alias", null, null, null, null), new AliasAction.RemoveIndex("test")))); assertEquals("test", e.getIndex().getName()); } @@ -125,6 +130,127 @@ public void testRemoveIndexTwice() { assertNull(after.metaData().getAliasAndIndexLookup().get("test")); } + public void testAddWriteOnlyWithNoExistingAliases() { + ClusterState before = createIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test"); + + ClusterState after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, false))); + assertFalse(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test"))); + + after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, null))); + assertNull(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test"))); + + after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, true))); + assertTrue(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test"))); + } + + public void testAddWriteOnlyWithExistingWriteIndex() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .putAlias(AliasMetaData.builder("alias").writeIndex(true).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + ClusterState after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, null))); + assertNull(after.metaData().index("test").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test2"))); + + Exception exception = expectThrows(IllegalStateException.class, () -> service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, true)))); + assertThat(exception.getMessage(), startsWith("alias [alias] has more than one write index [")); + } + + public void testSwapWriteOnlyIndex() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .putAlias(AliasMetaData.builder("alias").writeIndex(true).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + Boolean unsetValue = randomBoolean() ? null : false; + List swapActions = Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, unsetValue), + new AliasAction.Add("test2", "alias", null, null, null, true) + ); + Collections.shuffle(swapActions, random()); + ClusterState after = service.innerExecute(before, swapActions); + assertThat(after.metaData().index("test").getAliases().get("alias").writeIndex(), equalTo(unsetValue)); + assertTrue(after.metaData().index("test2").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test2"))); + } + + public void testAddWriteOnlyWithExistingNonWriteIndices() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .putAlias(AliasMetaData.builder("alias").writeIndex(randomBoolean() ? null : false).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .putAlias(AliasMetaData.builder("alias").writeIndex(randomBoolean() ? null : false).build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData3 = IndexMetaData.builder("test3") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2).put(indexMetaData3)).build(); + + assertNull(((AliasOrIndex.Alias) before.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex()); + + ClusterState after = service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test3", "alias", null, null, null, true))); + assertTrue(after.metaData().index("test3").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test3"))); + + } + + public void testAddWriteOnlyWithIndexRemoved() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .putAlias(AliasMetaData.builder("alias").build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .putAlias(AliasMetaData.builder("alias").build()) + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + assertNull(before.metaData().index("test").getAliases().get("alias").writeIndex()); + assertNull(before.metaData().index("test2").getAliases().get("alias").writeIndex()); + assertNull(((AliasOrIndex.Alias) before.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex()); + + ClusterState after = service.innerExecute(before, Collections.singletonList(new AliasAction.RemoveIndex("test"))); + assertNull(after.metaData().index("test2").getAliases().get("alias").writeIndex()); + assertThat(((AliasOrIndex.Alias) after.metaData().getAliasAndIndexLookup().get("alias")).getWriteIndex(), + equalTo(after.metaData().index("test2"))); + } + + public void testAddWriteOnlyValidatesAgainstMetaDataBuilder() { + IndexMetaData.Builder indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + IndexMetaData.Builder indexMetaData2 = IndexMetaData.builder("test2") + .settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1); + ClusterState before = ClusterState.builder(ClusterName.DEFAULT) + .metaData(MetaData.builder().put(indexMetaData).put(indexMetaData2)).build(); + + Exception exception = expectThrows(IllegalStateException.class, () -> service.innerExecute(before, Arrays.asList( + new AliasAction.Add("test", "alias", null, null, null, true), + new AliasAction.Add("test2", "alias", null, null, null, true) + ))); + assertThat(exception.getMessage(), startsWith("alias [alias] has more than one write index [")); + } + private ClusterState createIndex(ClusterState state, String index) { IndexMetaData indexMetaData = IndexMetaData.builder(index) .settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random()))) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 3a83580dc1cdd..96a533118c8da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -99,6 +99,34 @@ public void testAliasCollidingWithAnExistingIndex() { } } + public void testValidateAliasWriteOnly() { + String alias = randomAlphaOfLength(5); + String indexA = randomAlphaOfLength(6); + String indexB = randomAlphaOfLength(7); + Boolean aWriteIndex = randomBoolean() ? null : randomBoolean(); + Boolean bWriteIndex; + if (Boolean.TRUE.equals(aWriteIndex)) { + bWriteIndex = randomFrom(Boolean.FALSE, null); + } else { + bWriteIndex = randomFrom(Boolean.TRUE, Boolean.FALSE, null); + } + // when only one index/alias pair exist + MetaData metaData = MetaData.builder().put(buildIndexMetaData(indexA, alias, aWriteIndex)).build(); + + // when alias points to two indices, but valid + // one of the following combinations: [(null, null), (null, true), (null, false), (false, false)] + MetaData.builder(metaData).put(buildIndexMetaData(indexB, alias, bWriteIndex)).build(); + + // when too many write indices + Exception exception = expectThrows(IllegalStateException.class, + () -> { + IndexMetaData.Builder metaA = buildIndexMetaData(indexA, alias, true); + IndexMetaData.Builder metaB = buildIndexMetaData(indexB, alias, true); + MetaData.builder().put(metaA).put(metaB).build(); + }); + assertThat(exception.getMessage(), startsWith("alias [" + alias + "] has more than one write index [")); + } + public void testResolveIndexRouting() { IndexMetaData.Builder builder = IndexMetaData.builder("index") .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) @@ -428,6 +456,13 @@ public void testFindMappingsWithFilters() throws IOException { } } + private IndexMetaData.Builder buildIndexMetaData(String name, String alias, Boolean writeIndex) { + return IndexMetaData.builder(name) + .settings(settings(Version.CURRENT)).creationDate(randomNonNegativeLong()) + .putAlias(AliasMetaData.builder(alias).writeIndex(writeIndex)) + .numberOfShards(1).numberOfReplicas(0); + } + @SuppressWarnings("unchecked") private static void assertIndexMappingsNoFields(ImmutableOpenMap> mappings, String index) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java index 3b80d1f6e2cf0..bde478eb36381 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java @@ -111,7 +111,7 @@ public void testSimpleJsonFromAndTo() throws IOException { .putMapping("mapping1", MAPPING_SOURCE1) .putMapping("mapping2", MAPPING_SOURCE2) .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1)) - .putAlias(newAliasMetaDataBuilder("alias2")) + .putAlias(newAliasMetaDataBuilder("alias2").writeIndex(randomBoolean() ? null : randomBoolean())) .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2))) .put(IndexTemplateMetaData.builder("foo") .patterns(Collections.singletonList("bar")) @@ -132,7 +132,7 @@ public void testSimpleJsonFromAndTo() throws IOException { .putMapping("mapping1", MAPPING_SOURCE1) .putMapping("mapping2", MAPPING_SOURCE2) .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1)) - .putAlias(newAliasMetaDataBuilder("alias2")) + .putAlias(newAliasMetaDataBuilder("alias2").writeIndex(randomBoolean() ? null : randomBoolean())) .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2))) .put(IndexTemplateMetaData.builder("foo") .patterns(Collections.singletonList("bar")) @@ -146,7 +146,6 @@ public void testSimpleJsonFromAndTo() throws IOException { .build(); String metaDataSource = MetaData.Builder.toXContent(metaData); -// System.out.println("ToJson: " + metaDataSource); MetaData parsedMetaData = MetaData.Builder.fromXContent(createParser(JsonXContent.jsonXContent, metaDataSource)); @@ -270,6 +269,8 @@ public void testSimpleJsonFromAndTo() throws IOException { assertThat(indexMetaData.getAliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1)); assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2")); assertThat(indexMetaData.getAliases().get("alias2").filter(), nullValue()); + assertThat(indexMetaData.getAliases().get("alias2").writeIndex(), + equalTo(metaData.index("test11").getAliases().get("alias2").writeIndex())); assertThat(indexMetaData.getAliases().get("alias4").alias(), equalTo("alias4")); assertThat(indexMetaData.getAliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2)); @@ -288,6 +289,8 @@ public void testSimpleJsonFromAndTo() throws IOException { assertThat(indexMetaData.getAliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1)); assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2")); assertThat(indexMetaData.getAliases().get("alias2").filter(), nullValue()); + assertThat(indexMetaData.getAliases().get("alias2").writeIndex(), + equalTo(metaData.index("test12").getAliases().get("alias2").writeIndex())); assertThat(indexMetaData.getAliases().get("alias4").alias(), equalTo("alias4")); assertThat(indexMetaData.getAliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2)); diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index ba74e373f8842..8a4eb8e9177f1 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -159,8 +159,8 @@ public void testRegisterHttpTransport() { @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher) { @@ -198,8 +198,8 @@ public Map> getTransports(Settings settings, ThreadP @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher) { @@ -233,8 +233,8 @@ public Map> getTransports(Settings settings, ThreadP @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher) { diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index ee74d98002faa..a7629e5f48b6c 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -19,13 +19,27 @@ package org.elasticsearch.http; +import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import java.io.IOException; +import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static java.net.InetAddress.getByName; @@ -36,6 +50,27 @@ public class AbstractHttpServerTransportTests extends ESTestCase { + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + } + public void testHttpPublishPort() throws Exception { int boundPort = randomIntBetween(9000, 9100); int otherBoundPort = randomIntBetween(9200, 9300); @@ -71,6 +106,64 @@ public void testHttpPublishPort() throws Exception { } } + public void testDispatchDoesNotModifyThreadContext() { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("bar", "baz"); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + threadContext.putHeader("foo_bad", "bar"); + threadContext.putTransient("bar_bad", "baz"); + } + + }; + + try (AbstractHttpServerTransport transport = + new AbstractHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher) { + @Override + protected TransportAddress bindAddress(InetAddress hostAddress) { + return null; + } + + @Override + protected void doStart() { + + } + + @Override + protected void doStop() { + + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public HttpStats stats() { + return null; + } + }) { + + transport.dispatchRequest(null, null, null); + assertNull(threadPool.getThreadContext().getHeader("foo")); + assertNull(threadPool.getThreadContext().getTransient("bar")); + + transport.dispatchRequest(null, null, new Exception()); + assertNull(threadPool.getThreadContext().getHeader("foo_bad")); + assertNull(threadPool.getThreadContext().getTransient("bar_bad")); + } + } + private TransportAddress address(String host, int port) throws UnknownHostException { return new TransportAddress(getByName(host), port); } diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java new file mode 100644 index 0000000000000..bc499ed8a420a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -0,0 +1,444 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class DefaultRestChannelTests extends ESTestCase { + + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private HttpChannel httpChannel; + + @Before + public void setup() { + httpChannel = mock(HttpChannel.class); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + @After + public void shutdown() { + if (threadPool != null) { + threadPool.shutdownNow(); + } + } + + public void testResponse() { + final TestResponse response = executeRequest(Settings.EMPTY, "request-host"); + assertThat(response.content(), equalTo(new TestRestResponse().content())); + } + + // TODO: Enable these Cors tests when the Cors logic lives in :server + +// public void testCorsEnabledWithoutAllowOrigins() { +// // Set up a HTTP transport with only the CORS enabled setting +// Settings settings = Settings.builder() +// .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) +// .build(); +// HttpResponse response = executeRequest(settings, "remote-host", "request-host"); +// // inspect response and validate +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); +// } +// +// public void testCorsEnabledWithAllowOrigins() { +// final String originValue = "remote-host"; +// // create a http transport with CORS enabled and allow origin configured +// Settings settings = Settings.builder() +// .put(SETTING_CORS_ENABLED.getKey(), true) +// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) +// .build(); +// HttpResponse response = executeRequest(settings, originValue, "request-host"); +// // inspect response and validate +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// } +// +// public void testCorsAllowOriginWithSameHost() { +// String originValue = "remote-host"; +// String host = "remote-host"; +// // create a http transport with CORS enabled +// Settings settings = Settings.builder() +// .put(SETTING_CORS_ENABLED.getKey(), true) +// .build(); +// HttpResponse response = executeRequest(settings, originValue, host); +// // inspect response and validate +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// +// originValue = "http://" + originValue; +// response = executeRequest(settings, originValue, host); +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// +// originValue = originValue + ":5555"; +// host = host + ":5555"; +// response = executeRequest(settings, originValue, host); +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// +// originValue = originValue.replace("http", "https"); +// response = executeRequest(settings, originValue, host); +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// } +// +// public void testThatStringLiteralWorksOnMatch() { +// final String originValue = "remote-host"; +// Settings settings = Settings.builder() +// .put(SETTING_CORS_ENABLED.getKey(), true) +// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) +// .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") +// .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) +// .build(); +// HttpResponse response = executeRequest(settings, originValue, "request-host"); +// // inspect response and validate +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); +// } +// +// public void testThatAnyOriginWorks() { +// final String originValue = NioCorsHandler.ANY_ORIGIN; +// Settings settings = Settings.builder() +// .put(SETTING_CORS_ENABLED.getKey(), true) +// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) +// .build(); +// HttpResponse response = executeRequest(settings, originValue, "request-host"); +// // inspect response and validate +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); +// String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); +// assertThat(allowedOrigins, is(originValue)); +// assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); +// } + + public void testHeadersSet() { + Settings settings = Settings.builder().build(); + final TestRequest httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + httpRequest.getHeaders().put(DefaultRestChannel.X_OPAQUE_ID, Collections.singletonList("abc")); + final RestRequest request = RestRequest.request(xContentRegistry(), httpRequest, httpChannel); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + + // send a response + DefaultRestChannel channel = new DefaultRestChannel(httpChannel, httpRequest, request, bigArrays, handlingSettings, + threadPool.getThreadContext()); + TestRestResponse resp = new TestRestResponse(); + final String customHeader = "custom-header"; + final String customHeaderValue = "xyz"; + resp.addHeader(customHeader, customHeaderValue); + channel.sendResponse(resp); + + // inspect what was written + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestResponse.class); + verify(httpChannel).sendResponse(responseCaptor.capture(), any()); + TestResponse httpResponse = responseCaptor.getValue(); + Map> headers = httpResponse.headers; + assertNull(headers.get("non-existent-header")); + assertEquals(customHeaderValue, headers.get(customHeader).get(0)); + assertEquals("abc", headers.get(DefaultRestChannel.X_OPAQUE_ID).get(0)); + assertEquals(Integer.toString(resp.content().length()), headers.get(DefaultRestChannel.CONTENT_LENGTH).get(0)); + assertEquals(resp.contentType(), headers.get(DefaultRestChannel.CONTENT_TYPE).get(0)); + } + + public void testCookiesSet() { + Settings settings = Settings.builder().put(HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.getKey(), true).build(); + final TestRequest httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + httpRequest.getHeaders().put(DefaultRestChannel.X_OPAQUE_ID, Collections.singletonList("abc")); + final RestRequest request = RestRequest.request(xContentRegistry(), httpRequest, httpChannel); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + + // send a response + DefaultRestChannel channel = new DefaultRestChannel(httpChannel, httpRequest, request, bigArrays, handlingSettings, + threadPool.getThreadContext()); + channel.sendResponse(new TestRestResponse()); + + // inspect what was written + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestResponse.class); + verify(httpChannel).sendResponse(responseCaptor.capture(), any()); + TestResponse nioResponse = responseCaptor.getValue(); + Map> headers = nioResponse.headers; + assertThat(headers.get(DefaultRestChannel.SET_COOKIE), hasItem("cookie")); + assertThat(headers.get(DefaultRestChannel.SET_COOKIE), hasItem("cookie2")); + } + + @SuppressWarnings("unchecked") + public void testReleaseInListener() throws IOException { + final Settings settings = Settings.builder().build(); + final TestRequest httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + final RestRequest request = RestRequest.request(xContentRegistry(), httpRequest, httpChannel); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + + DefaultRestChannel channel = new DefaultRestChannel(httpChannel, httpRequest, request, bigArrays, handlingSettings, + threadPool.getThreadContext()); + final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, + JsonXContent.contentBuilder().startObject().endObject()); + assertThat(response.content(), not(instanceOf(Releasable.class))); + + // ensure we have reserved bytes + if (randomBoolean()) { + BytesStreamOutput out = channel.bytesOutput(); + assertThat(out, instanceOf(ReleasableBytesStreamOutput.class)); + } else { + try (XContentBuilder builder = channel.newBuilder()) { + // do something builder + builder.startObject().endObject(); + } + } + + channel.sendResponse(response); + Class> listenerClass = (Class>) (Class) ActionListener.class; + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); + verify(httpChannel).sendResponse(any(), listenerCaptor.capture()); + ActionListener listener = listenerCaptor.getValue(); + if (randomBoolean()) { + listener.onResponse(null); + } else { + listener.onFailure(new ClosedChannelException()); + } + // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released + } + + @SuppressWarnings("unchecked") + public void testConnectionClose() throws Exception { + final Settings settings = Settings.builder().build(); + final HttpRequest httpRequest; + final boolean close = randomBoolean(); + if (randomBoolean()) { + httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + if (close) { + httpRequest.getHeaders().put(DefaultRestChannel.CONNECTION, Collections.singletonList(DefaultRestChannel.CLOSE)); + } + } else { + httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_0, RestRequest.Method.GET, "/"); + if (!close) { + httpRequest.getHeaders().put(DefaultRestChannel.CONNECTION, Collections.singletonList(DefaultRestChannel.KEEP_ALIVE)); + } + } + final RestRequest request = RestRequest.request(xContentRegistry(), httpRequest, httpChannel); + + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + + DefaultRestChannel channel = new DefaultRestChannel(httpChannel, httpRequest, request, bigArrays, handlingSettings, + threadPool.getThreadContext()); + channel.sendResponse(new TestRestResponse()); + Class> listenerClass = (Class>) (Class) ActionListener.class; + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); + verify(httpChannel).sendResponse(any(), listenerCaptor.capture()); + ActionListener listener = listenerCaptor.getValue(); + if (randomBoolean()) { + listener.onResponse(null); + } else { + listener.onFailure(new ClosedChannelException()); + } + if (close) { + verify(httpChannel, times(1)).close(); + } else { + verify(httpChannel, times(0)).close(); + } + } + + private TestResponse executeRequest(final Settings settings, final String host) { + return executeRequest(settings, null, host); + } + + private TestResponse executeRequest(final Settings settings, final String originValue, final String host) { + HttpRequest httpRequest = new TestRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); + // TODO: These exist for the Cors tests +// if (originValue != null) { +// httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); +// } +// httpRequest.headers().add(HttpHeaderNames.HOST, host); + final RestRequest request = RestRequest.request(xContentRegistry(), httpRequest, httpChannel); + + HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); + RestChannel channel = new DefaultRestChannel(httpChannel, httpRequest, request, bigArrays, httpHandlingSettings, + threadPool.getThreadContext()); + channel.sendResponse(new TestRestResponse()); + + // get the response + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestResponse.class); + verify(httpChannel, atLeastOnce()).sendResponse(responseCaptor.capture(), any()); + return responseCaptor.getValue(); + } + + private static class TestRequest implements HttpRequest { + + private final HttpVersion version; + private final RestRequest.Method method; + private final String uri; + private HashMap> headers = new HashMap<>(); + + private TestRequest(HttpVersion version, RestRequest.Method method, String uri) { + + this.version = version; + this.method = method; + this.uri = uri; + } + + @Override + public RestRequest.Method method() { + return method; + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + return BytesArray.EMPTY; + } + + @Override + public Map> getHeaders() { + return headers; + } + + @Override + public List strictCookies() { + return Arrays.asList("cookie", "cookie2"); + } + + @Override + public HttpVersion protocolVersion() { + return version; + } + + @Override + public HttpRequest removeHeader(String header) { + throw new UnsupportedOperationException("Do not support removing header on test request."); + } + + @Override + public HttpResponse createResponse(RestStatus status, BytesReference content) { + return new TestResponse(status, content); + } + } + + private static class TestResponse implements HttpResponse { + + private final RestStatus status; + private final BytesReference content; + private final Map> headers = new HashMap<>(); + + TestResponse(RestStatus status, BytesReference content) { + this.status = status; + this.content = content; + } + + public String contentType() { + return "text"; + } + + public BytesReference content() { + return content; + } + + public RestStatus status() { + return status; + } + + @Override + public void addHeader(String name, String value) { + if (headers.containsKey(name) == false) { + ArrayList values = new ArrayList<>(); + values.add(value); + headers.put(name, values); + } else { + headers.get(name).add(value); + } + } + + @Override + public boolean containsHeader(String name) { + return headers.containsKey(name); + } + } + + private static class TestRestResponse extends RestResponse { + + private final BytesReference content; + + TestRestResponse() { + content = new BytesArray("content".getBytes(StandardCharsets.UTF_8)); + } + + public String contentType() { + return "text"; + } + + public BytesReference content() { + return content; + } + + public RestStatus status() { + return RestStatus.OK; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 9352d978e6e46..2cea9bb364684 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -39,7 +39,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.index.Term; -import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; @@ -145,18 +144,8 @@ public void testRefCount() throws IOException { store.decRef(); assertThat(store.refCount(), Matchers.equalTo(0)); assertFalse(store.tryIncRef()); - try { - store.incRef(); - fail(" expected exception"); - } catch (AlreadyClosedException ex) { - - } - try { - store.ensureOpen(); - fail(" expected exception"); - } catch (AlreadyClosedException ex) { - - } + expectThrows(IllegalStateException.class, store::incRef); + expectThrows(IllegalStateException.class, store::ensureOpen); } public void testVerifyingIndexOutput() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index 916fdee213695..f13a35613d530 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.TestThreadPool; @@ -649,7 +648,7 @@ public Assignment getAssignment(P params, ClusterState clusterState) { } @Override - protected void nodeOperation(AllocatedPersistentTask task, P params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, P params, PersistentTaskState state) { throw new UnsupportedOperationException(); } })); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java index 72e74359d3016..5b1f74d6cdfa5 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java @@ -42,10 +42,9 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Builder; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; -import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractDiffableSerializationTestCase; import java.io.IOException; @@ -79,7 +78,7 @@ protected PersistentTasksCustomMetaData createTestInstance() { randomAssignment()); if (randomBoolean()) { // From time to time update status - tasks.updateTaskStatus(taskId, new Status(randomAlphaOfLength(10))); + tasks.updateTaskState(taskId, new State(randomAlphaOfLength(10))); } } return tasks.build(); @@ -96,7 +95,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { new Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::new), new Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::readDiffFrom), new Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new), - new Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new) + new Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) )); } @@ -118,7 +117,7 @@ protected Custom makeTestChanges(Custom testInstance) { if (builder.getCurrentTaskIds().isEmpty()) { addRandomTask(builder); } else { - builder.updateTaskStatus(pickRandomTask(builder), randomBoolean() ? new Status(randomAlphaOfLength(10)) : null); + builder.updateTaskState(pickRandomTask(builder), randomBoolean() ? new State(randomAlphaOfLength(10)) : null); } break; case 3: @@ -155,9 +154,10 @@ private String pickRandomTask(PersistentTasksCustomMetaData.Builder testInstance @Override protected NamedXContentRegistry xContentRegistry() { return new NamedXContentRegistry(Arrays.asList( - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME), - TestParams::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, + new ParseField(TestPersistentTasksExecutor.NAME), TestParams::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, + new ParseField(TestPersistentTasksExecutor.NAME), State::fromXContent) )); } @@ -186,7 +186,7 @@ public void testSerializationContext() throws Exception { // Things that should be serialized assertEquals(testTask.getTaskName(), newTask.getTaskName()); assertEquals(testTask.getId(), newTask.getId()); - assertEquals(testTask.getStatus(), newTask.getStatus()); + assertEquals(testTask.getState(), newTask.getState()); assertEquals(testTask.getParams(), newTask.getParams()); // Things that shouldn't be serialized @@ -224,10 +224,10 @@ public void testBuilder() { case 2: if (builder.hasTask(lastKnownTask)) { changed = true; - builder.updateTaskStatus(lastKnownTask, randomBoolean() ? new Status(randomAlphaOfLength(10)) : null); + builder.updateTaskState(lastKnownTask, randomBoolean() ? new State(randomAlphaOfLength(10)) : null); } else { String fLastKnownTask = lastKnownTask; - expectThrows(ResourceNotFoundException.class, () -> builder.updateTaskStatus(fLastKnownTask, null)); + expectThrows(ResourceNotFoundException.class, () -> builder.updateTaskState(fLastKnownTask, null)); } break; case 3: diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java index 356e518198c52..655a21a5f5390 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -64,7 +63,7 @@ public void setUp() throws Exception { public PersistentTasksExecutor getPersistentTaskExecutorSafe(String taskName) { return new PersistentTasksExecutor(clusterService.getSettings(), taskName, null) { @Override - protected void nodeOperation(AllocatedPersistentTask task, Params params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, Params params, PersistentTaskState state) { logger.debug("Executing task {}", task); } }; diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 8f37a2412ef5a..e746ff71627cd 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -31,7 +31,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskListener; -import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestTasksRequestBuilder; @@ -190,11 +190,11 @@ public void testPersistentActionStatusUpdate() throws Exception { PersistentTasksCustomMetaData tasksInProgress = internalCluster().clusterService().state().getMetaData() .custom(PersistentTasksCustomMetaData.TYPE); assertThat(tasksInProgress.tasks().size(), equalTo(1)); - assertThat(tasksInProgress.tasks().iterator().next().getStatus(), nullValue()); + assertThat(tasksInProgress.tasks().iterator().next().getState(), nullValue()); int numberOfUpdates = randomIntBetween(1, 10); for (int i = 0; i < numberOfUpdates; i++) { - logger.info("Updating the task status"); + logger.info("Updating the task states"); // Complete the running task and make sure it finishes properly assertThat(new TestTasksRequestBuilder(client()).setOperation("update_status").setTaskId(firstRunningTask.getTaskId()) .get().getTasks().size(), equalTo(1)); @@ -202,8 +202,8 @@ public void testPersistentActionStatusUpdate() throws Exception { int finalI = i; WaitForPersistentTaskFuture future1 = new WaitForPersistentTaskFuture<>(); persistentTasksService.waitForPersistentTaskCondition(taskId, - task -> task != null && task.getStatus() != null && task.getStatus().toString() != null && - task.getStatus().toString().equals("{\"phase\":\"phase " + (finalI + 1) + "\"}"), + task -> task != null && task.getState() != null && task.getState().toString() != null && + task.getState().toString().equals("{\"phase\":\"phase " + (finalI + 1) + "\"}"), TimeValue.timeValueSeconds(10), future1); assertThat(future1.get().getId(), equalTo(taskId)); } @@ -215,7 +215,7 @@ public void testPersistentActionStatusUpdate() throws Exception { assertThrows(future1, IllegalStateException.class, "timed out after 10ms"); PlainActionFuture> failedUpdateFuture = new PlainActionFuture<>(); - persistentTasksService.updateStatus(taskId, -2, new Status("should fail"), failedUpdateFuture); + persistentTasksService.sendUpdateStateRequest(taskId, -2, new State("should fail"), failedUpdateFuture); assertThrows(failedUpdateFuture, ResourceNotFoundException.class, "the task with id " + taskId + " and allocation id -2 doesn't exist"); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 5000f73445b0c..906ecf232053d 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -210,13 +210,12 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { ClusterState state = createInitialClusterState(1, Settings.EMPTY); - Task.Status status = new TestPersistentTasksPlugin.Status("_test_phase"); + PersistentTaskState taskState = new TestPersistentTasksPlugin.State("_test_phase"); PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(); String taskId = UUIDs.base64UUID(); TestParams taskParams = new TestParams("other_0"); - tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, taskParams, - new Assignment("this_node", "test assignment on other node")); - tasks.updateTaskStatus(taskId, status); + tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, taskParams, new Assignment("this_node", "test assignment on other node")); + tasks.updateTaskState(taskId, taskState); MetaData.Builder metaData = MetaData.builder(state.metaData()); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()); ClusterState newClusterState = ClusterState.builder(state).metaData(metaData).build(); @@ -225,7 +224,7 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { assertThat(executor.size(), equalTo(1)); assertThat(executor.get(0).params, sameInstance(taskParams)); - assertThat(executor.get(0).status, sameInstance(status)); + assertThat(executor.get(0).state, sameInstance(taskState)); assertThat(executor.get(0).task, sameInstance(nodeTask)); } @@ -331,15 +330,16 @@ private ClusterState removeTask(ClusterState state, String taskId) { } private class Execution { + private final PersistentTaskParams params; private final AllocatedPersistentTask task; - private final Task.Status status; + private final PersistentTaskState state; private final PersistentTasksExecutor holder; - Execution(PersistentTaskParams params, AllocatedPersistentTask task, Task.Status status, PersistentTasksExecutor holder) { + Execution(PersistentTaskParams params, AllocatedPersistentTask task, PersistentTaskState state, PersistentTasksExecutor holder) { this.params = params; this.task = task; - this.status = status; + this.state = state; this.holder = holder; } } @@ -352,11 +352,11 @@ private class MockExecutor extends NodePersistentTasksExecutor { } @Override - public void executeTask(Params params, - Task.Status status, - AllocatedPersistentTask task, - PersistentTasksExecutor executor) { - executions.add(new Execution(params, task, status, executor)); + public void executeTask(final Params params, + final PersistentTaskState state, + final AllocatedPersistentTask task, + final PersistentTasksExecutor executor) { + executions.add(new Execution(params, task, state, executor)); } public Execution get(int i) { diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 97b3407938768..063a861b5c315 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -55,7 +55,6 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -100,16 +99,17 @@ public List> getPersistentTasksExecutor(ClusterServic public List getNamedWriteables() { return Arrays.asList( new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new), - new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new) + new NamedWriteableRegistry.Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) ); } @Override public List getNamedXContent() { return Arrays.asList( - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME), - TestParams::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, + new ParseField(TestPersistentTasksExecutor.NAME), TestParams::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, + new ParseField(TestPersistentTasksExecutor.NAME), State::fromXContent) ); } @@ -221,22 +221,22 @@ public Optional getRequiredFeature() { } } - public static class Status implements Task.Status { + public static class State implements PersistentTaskState { private final String phase; - public static final ConstructingObjectParser STATUS_PARSER = - new ConstructingObjectParser<>(TestPersistentTasksExecutor.NAME, args -> new Status((String) args[0])); + public static final ConstructingObjectParser STATE_PARSER = + new ConstructingObjectParser<>(TestPersistentTasksExecutor.NAME, args -> new State((String) args[0])); static { - STATUS_PARSER.declareString(constructorArg(), new ParseField("phase")); + STATE_PARSER.declareString(constructorArg(), new ParseField("phase")); } - public Status(String phase) { + public State(String phase) { this.phase = requireNonNull(phase, "Phase cannot be null"); } - public Status(StreamInput in) throws IOException { + public State(StreamInput in) throws IOException { phase = in.readString(); } @@ -253,11 +253,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static Task.Status fromXContent(XContentParser parser) throws IOException { - return STATUS_PARSER.parse(parser, null); + public static PersistentTaskState fromXContent(XContentParser parser) throws IOException { + return STATE_PARSER.parse(parser, null); } - @Override public boolean isFragment() { return false; @@ -276,10 +275,10 @@ public String toString() { // Implements equals and hashcode for testing @Override public boolean equals(Object obj) { - if (obj == null || obj.getClass() != Status.class) { + if (obj == null || obj.getClass() != State.class) { return false; } - Status other = (Status) obj; + State other = (State) obj; return phase.equals(other.phase); } @@ -289,7 +288,6 @@ public int hashCode() { } } - public static class TestPersistentTasksExecutor extends PersistentTasksExecutor { public static final String NAME = "cluster:admin/persistent/test"; @@ -317,7 +315,7 @@ public Assignment getAssignment(TestParams params, ClusterState clusterState) { } @Override - protected void nodeOperation(AllocatedPersistentTask task, TestParams params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, TestParams params, PersistentTaskState state) { logger.info("started node operation for the task {}", task); try { TestTask testTask = (TestTask) task; @@ -340,9 +338,9 @@ protected void nodeOperation(AllocatedPersistentTask task, TestParams params, Ta } else if ("update_status".equals(testTask.getOperation())) { testTask.setOperation(null); CountDownLatch latch = new CountDownLatch(1); - Status newStatus = new Status("phase " + phase.incrementAndGet()); - logger.info("updating the task status to {}", newStatus); - task.updatePersistentStatus(newStatus, new ActionListener>() { + State newState = new State("phase " + phase.incrementAndGet()); + logger.info("updating the task state to {}", newState); + task.updatePersistentTaskState(newState, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { logger.info("updating was successful"); @@ -540,5 +538,4 @@ protected void taskOperation(TestTasksRequest request, TestTask task, ActionList } - } diff --git a/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java b/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java index 6e20bb0009732..5ae54640f8e31 100644 --- a/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/UpdatePersistentTaskRequestTests.java @@ -20,9 +20,8 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.AbstractStreamableTestCase; -import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction.Request; @@ -32,7 +31,7 @@ public class UpdatePersistentTaskRequestTests extends AbstractStreamableTestCase @Override protected Request createTestInstance() { - return new Request(UUIDs.base64UUID(), randomLong(), new Status(randomAlphaOfLength(10))); + return new Request(UUIDs.base64UUID(), randomLong(), new State(randomAlphaOfLength(10))); } @Override @@ -43,7 +42,7 @@ protected Request createBlankInstance() { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry(Collections.singletonList( - new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new) + new NamedWriteableRegistry.Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) )); } } diff --git a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index a0e6f7020302d..a80c3b1bd4238 100644 --- a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -165,28 +164,7 @@ public void testConvert() throws IOException { public void testResponseWhenPathContainsEncodingError() throws IOException { final String path = "%a"; - final RestRequest request = - new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { - @Override - public Method method() { - return null; - } - - @Override - public String uri() { - return null; - } - - @Override - public boolean hasContent() { - return false; - } - - @Override - public BytesReference content() { - return null; - } - }; + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestUtils.decodeComponent(request.rawPath())); final RestChannel channel = new DetailedExceptionRestChannel(request); // if we try to decode the path, this will throw an IllegalArgumentException again diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index f36638a43909f..a090cc40b6857 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -110,21 +110,21 @@ public void testApplyRelevantHeaders() throws Exception { RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(restHeaders).build(); final RestController spyRestController = spy(restController); when(spyRestController.getAllHandlers(fakeRequest)) - .thenReturn(new Iterator() { - @Override - public boolean hasNext() { - return false; - } - - @Override - public MethodHandlers next() { - return new MethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { - assertEquals("true", threadContext.getHeader("header.1")); - assertEquals("true", threadContext.getHeader("header.2")); - assertNull(threadContext.getHeader("header.3")); - }, RestRequest.Method.GET); - } - }); + .thenReturn(new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public MethodHandlers next() { + return new MethodHandlers("/", (RestRequest request, RestChannel channel, NodeClient client) -> { + assertEquals("true", threadContext.getHeader("header.1")); + assertEquals("true", threadContext.getHeader("header.2")); + assertNull(threadContext.getHeader("header.3")); + }, RestRequest.Method.GET); + } + }); AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); // the rest controller relies on the caller to stash the context, so we should expect these values here as we didn't stash the @@ -136,7 +136,7 @@ public MethodHandlers next() { public void testCanTripCircuitBreaker() throws Exception { RestController controller = new RestController(Settings.EMPTY, Collections.emptySet(), null, null, circuitBreakerService, - usageService); + usageService); // trip circuit breaker by default controller.registerHandler(RestRequest.Method.GET, "/trip", new FakeRestHandler(true)); controller.registerHandler(RestRequest.Method.GET, "/do-not-trip", new FakeRestHandler(false)); @@ -209,7 +209,7 @@ public void testRestHandlerWrapper() throws Exception { return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true); }; final RestController restController = new RestController(Settings.EMPTY, Collections.emptySet(), wrapper, null, - circuitBreakerService, usageService); + circuitBreakerService, usageService); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); restController.dispatchRequest(new FakeRestRequest.Builder(xContentRegistry()).build(), null, null, Optional.of(handler)); assertTrue(wrapperCalled.get()); @@ -240,7 +240,7 @@ public boolean canTripCircuitBreaker() { public void testDispatchRequestAddsAndFreesBytesOnSuccess() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength(contentLength); - TestRestRequest request = new TestRestRequest("/", content, XContentType.JSON); + RestRequest request = testRestRequest("/", content, XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); @@ -252,7 +252,7 @@ public void testDispatchRequestAddsAndFreesBytesOnSuccess() { public void testDispatchRequestAddsAndFreesBytesOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength(contentLength); - TestRestRequest request = new TestRestRequest("/error", content, XContentType.JSON); + RestRequest request = testRestRequest("/error", content, XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); @@ -265,7 +265,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { int contentLength = BREAKER_LIMIT.bytesAsInt(); String content = randomAlphaOfLength(contentLength); // we will produce an error in the rest handler and one more when sending the error response - TestRestRequest request = new TestRestRequest("/error", content, XContentType.JSON); + RestRequest request = testRestRequest("/error", content, XContentType.JSON); ExceptionThrowingChannel channel = new ExceptionThrowingChannel(request, true); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); @@ -277,7 +277,7 @@ public void testDispatchRequestAddsAndFreesBytesOnlyOnceOnError() { public void testDispatchRequestLimitsBytes() { int contentLength = BREAKER_LIMIT.bytesAsInt() + 1; String content = randomAlphaOfLength(contentLength); - TestRestRequest request = new TestRestRequest("/", content, XContentType.JSON); + RestRequest request = testRestRequest("/", content, XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.SERVICE_UNAVAILABLE); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); @@ -288,11 +288,11 @@ public void testDispatchRequestLimitsBytes() { public void testDispatchRequiresContentTypeForRequestsWithContent() { String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); - TestRestRequest request = new TestRestRequest("/", content, null); + RestRequest request = testRestRequest("/", content, null); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); restController = new RestController( Settings.builder().put(HttpTransportSettings.SETTING_HTTP_CONTENT_TYPE_REQUIRED.getKey(), true).build(), - Collections.emptySet(), null, null, circuitBreakerService, usageService); + Collections.emptySet(), null, null, circuitBreakerService, usageService); restController.registerHandler(RestRequest.Method.GET, "/", (r, c, client) -> c.sendResponse( new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY))); @@ -412,8 +412,8 @@ public boolean supportsContentStream() { public void testNonStreamingXContentCausesErrorResponse() throws IOException { FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) - .withContent(BytesReference.bytes(YamlXContent.contentBuilder().startObject().endObject()), - XContentType.YAML).withPath("/foo").build(); + .withContent(BytesReference.bytes(YamlXContent.contentBuilder().startObject().endObject()), + XContentType.YAML).withPath("/foo").build(); AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.NOT_ACCEPTABLE); restController.registerHandler(RestRequest.Method.GET, "/foo", new RestHandler() { @Override @@ -457,10 +457,10 @@ public void testDispatchBadRequest() { final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).build(); final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); restController.dispatchBadRequest( - fakeRestRequest, - channel, - new ThreadContext(Settings.EMPTY), - randomBoolean() ? new IllegalStateException("bad request") : new Throwable("bad request")); + fakeRestRequest, + channel, + new ThreadContext(Settings.EMPTY), + randomBoolean() ? new IllegalStateException("bad request") : new Throwable("bad request")); assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().content().utf8ToString(), containsString("bad request")); } @@ -495,7 +495,7 @@ protected void doClose() { @Override public BoundTransportAddress boundAddress() { TransportAddress transportAddress = buildNewFakeTransportAddress(); - return new BoundTransportAddress(new TransportAddress[] {transportAddress} ,transportAddress); + return new BoundTransportAddress(new TransportAddress[]{transportAddress}, transportAddress); } @Override @@ -547,35 +547,11 @@ public void sendResponse(RestResponse response) { } } - private static final class TestRestRequest extends RestRequest { - - private final BytesReference content; - - private TestRestRequest(String path, String content, XContentType xContentType) { - super(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, xContentType == null ? - Collections.emptyMap() : Collections.singletonMap("Content-Type", Collections.singletonList(xContentType.mediaType()))); - this.content = new BytesArray(content); - } - - @Override - public Method method() { - return Method.GET; - } - - @Override - public String uri() { - return null; - } - - @Override - public boolean hasContent() { - return true; - } - - @Override - public BytesReference content() { - return content; - } - + private static RestRequest testRestRequest(String path, String content, XContentType xContentType) { + FakeRestRequest.Builder builder = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY); + builder.withPath(path); + builder.withContent(new BytesArray(content), xContentType); + return builder.build(); } } + diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index 1b4bbff7322de..3ad9c61de3c8e 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; import java.io.IOException; import java.util.ArrayList; @@ -44,66 +45,66 @@ public class RestRequestTests extends ESTestCase { public void testContentParser() throws IOException { Exception e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", emptyMap()).contentParser()); + contentRestRequest("", emptyMap()).contentParser()); assertEquals("request body is required", e.getMessage()); e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", singletonMap("source", "{}")).contentParser()); + contentRestRequest("", singletonMap("source", "{}")).contentParser()); assertEquals("request body is required", e.getMessage()); - assertEquals(emptyMap(), new ContentRestRequest("{}", emptyMap()).contentParser().map()); + assertEquals(emptyMap(), contentRestRequest("{}", emptyMap()).contentParser().map()); e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", emptyMap(), emptyMap()).contentParser()); + contentRestRequest("", emptyMap(), emptyMap()).contentParser()); assertEquals("request body is required", e.getMessage()); } public void testApplyContentParser() throws IOException { - new ContentRestRequest("", emptyMap()).applyContentParser(p -> fail("Shouldn't have been called")); - new ContentRestRequest("", singletonMap("source", "{}")).applyContentParser(p -> fail("Shouldn't have been called")); + contentRestRequest("", emptyMap()).applyContentParser(p -> fail("Shouldn't have been called")); + contentRestRequest("", singletonMap("source", "{}")).applyContentParser(p -> fail("Shouldn't have been called")); AtomicReference source = new AtomicReference<>(); - new ContentRestRequest("{}", emptyMap()).applyContentParser(p -> source.set(p.map())); + contentRestRequest("{}", emptyMap()).applyContentParser(p -> source.set(p.map())); assertEquals(emptyMap(), source.get()); } public void testContentOrSourceParam() throws IOException { Exception e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", emptyMap()).contentOrSourceParam()); + contentRestRequest("", emptyMap()).contentOrSourceParam()); assertEquals("request body or source parameter is required", e.getMessage()); - assertEquals(new BytesArray("stuff"), new ContentRestRequest("stuff", emptyMap()).contentOrSourceParam().v2()); + assertEquals(new BytesArray("stuff"), contentRestRequest("stuff", emptyMap()).contentOrSourceParam().v2()); assertEquals(new BytesArray("stuff"), - new ContentRestRequest("stuff", MapBuilder.newMapBuilder() + contentRestRequest("stuff", MapBuilder.newMapBuilder() .put("source", "stuff2").put("source_content_type", "application/json").immutableMap()).contentOrSourceParam().v2()); assertEquals(new BytesArray("{\"foo\": \"stuff\"}"), - new ContentRestRequest("", MapBuilder.newMapBuilder() + contentRestRequest("", MapBuilder.newMapBuilder() .put("source", "{\"foo\": \"stuff\"}").put("source_content_type", "application/json").immutableMap()) .contentOrSourceParam().v2()); e = expectThrows(IllegalStateException.class, () -> - new ContentRestRequest("", MapBuilder.newMapBuilder() + contentRestRequest("", MapBuilder.newMapBuilder() .put("source", "stuff2").immutableMap()).contentOrSourceParam()); assertEquals("source and source_content_type parameters are required", e.getMessage()); } public void testHasContentOrSourceParam() throws IOException { - assertEquals(false, new ContentRestRequest("", emptyMap()).hasContentOrSourceParam()); - assertEquals(true, new ContentRestRequest("stuff", emptyMap()).hasContentOrSourceParam()); - assertEquals(true, new ContentRestRequest("stuff", singletonMap("source", "stuff2")).hasContentOrSourceParam()); - assertEquals(true, new ContentRestRequest("", singletonMap("source", "stuff")).hasContentOrSourceParam()); + assertEquals(false, contentRestRequest("", emptyMap()).hasContentOrSourceParam()); + assertEquals(true, contentRestRequest("stuff", emptyMap()).hasContentOrSourceParam()); + assertEquals(true, contentRestRequest("stuff", singletonMap("source", "stuff2")).hasContentOrSourceParam()); + assertEquals(true, contentRestRequest("", singletonMap("source", "stuff")).hasContentOrSourceParam()); } public void testContentOrSourceParamParser() throws IOException { Exception e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", emptyMap()).contentOrSourceParamParser()); + contentRestRequest("", emptyMap()).contentOrSourceParamParser()); assertEquals("request body or source parameter is required", e.getMessage()); - assertEquals(emptyMap(), new ContentRestRequest("{}", emptyMap()).contentOrSourceParamParser().map()); - assertEquals(emptyMap(), new ContentRestRequest("{}", singletonMap("source", "stuff2")).contentOrSourceParamParser().map()); - assertEquals(emptyMap(), new ContentRestRequest("", MapBuilder.newMapBuilder() + assertEquals(emptyMap(), contentRestRequest("{}", emptyMap()).contentOrSourceParamParser().map()); + assertEquals(emptyMap(), contentRestRequest("{}", singletonMap("source", "stuff2")).contentOrSourceParamParser().map()); + assertEquals(emptyMap(), contentRestRequest("", MapBuilder.newMapBuilder() .put("source", "{}").put("source_content_type", "application/json").immutableMap()).contentOrSourceParamParser().map()); } public void testWithContentOrSourceParamParserOrNull() throws IOException { - new ContentRestRequest("", emptyMap()).withContentOrSourceParamParserOrNull(parser -> assertNull(parser)); - new ContentRestRequest("{}", emptyMap()).withContentOrSourceParamParserOrNull(parser -> assertEquals(emptyMap(), parser.map())); - new ContentRestRequest("{}", singletonMap("source", "stuff2")).withContentOrSourceParamParserOrNull(parser -> + contentRestRequest("", emptyMap()).withContentOrSourceParamParserOrNull(parser -> assertNull(parser)); + contentRestRequest("{}", emptyMap()).withContentOrSourceParamParserOrNull(parser -> assertEquals(emptyMap(), parser.map())); + contentRestRequest("{}", singletonMap("source", "stuff2")).withContentOrSourceParamParserOrNull(parser -> assertEquals(emptyMap(), parser.map())); - new ContentRestRequest("", MapBuilder.newMapBuilder().put("source_content_type", "application/json") + contentRestRequest("", MapBuilder.newMapBuilder().put("source_content_type", "application/json") .put("source", "{}").immutableMap()) .withContentOrSourceParamParserOrNull(parser -> assertEquals(emptyMap(), parser.map())); @@ -113,18 +114,18 @@ public void testContentTypeParsing() { for (XContentType xContentType : XContentType.values()) { Map> map = new HashMap<>(); map.put("Content-Type", Collections.singletonList(xContentType.mediaType())); - ContentRestRequest restRequest = new ContentRestRequest("", Collections.emptyMap(), map); + RestRequest restRequest = contentRestRequest("", Collections.emptyMap(), map); assertEquals(xContentType, restRequest.getXContentType()); map = new HashMap<>(); map.put("Content-Type", Collections.singletonList(xContentType.mediaTypeWithoutParameters())); - restRequest = new ContentRestRequest("", Collections.emptyMap(), map); + restRequest = contentRestRequest("", Collections.emptyMap(), map); assertEquals(xContentType, restRequest.getXContentType()); } } public void testPlainTextSupport() { - ContentRestRequest restRequest = new ContentRestRequest(randomAlphaOfLengthBetween(1, 30), Collections.emptyMap(), + RestRequest restRequest = contentRestRequest(randomAlphaOfLengthBetween(1, 30), Collections.emptyMap(), Collections.singletonMap("Content-Type", Collections.singletonList(randomFrom("text/plain", "text/plain; charset=utf-8", "text/plain;charset=utf-8")))); assertNull(restRequest.getXContentType()); @@ -136,7 +137,7 @@ public void testMalformedContentTypeHeader() { RestRequest.ContentTypeHeaderException.class, () -> { final Map> headers = Collections.singletonMap("Content-Type", Collections.singletonList(type)); - new ContentRestRequest("", Collections.emptyMap(), headers); + contentRestRequest("", Collections.emptyMap(), headers); }); assertNotNull(e.getCause()); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); @@ -144,7 +145,7 @@ public void testMalformedContentTypeHeader() { } public void testNoContentTypeHeader() { - ContentRestRequest contentRestRequest = new ContentRestRequest("", Collections.emptyMap(), Collections.emptyMap()); + RestRequest contentRestRequest = contentRestRequest("", Collections.emptyMap(), Collections.emptyMap()); assertNull(contentRestRequest.getXContentType()); } @@ -152,7 +153,7 @@ public void testMultipleContentTypeHeaders() { List headers = new ArrayList<>(randomUnique(() -> randomAlphaOfLengthBetween(1, 16), randomIntBetween(2, 10))); final RestRequest.ContentTypeHeaderException e = expectThrows( RestRequest.ContentTypeHeaderException.class, - () -> new ContentRestRequest("", Collections.emptyMap(), Collections.singletonMap("Content-Type", headers))); + () -> contentRestRequest("", Collections.emptyMap(), Collections.singletonMap("Content-Type", headers))); assertNotNull(e.getCause()); assertThat(e.getCause(), instanceOf((IllegalArgumentException.class))); assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: only one Content-Type header should be provided")); @@ -160,52 +161,64 @@ public void testMultipleContentTypeHeaders() { public void testRequiredContent() { Exception e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", emptyMap()).requiredContent()); + contentRestRequest("", emptyMap()).requiredContent()); assertEquals("request body is required", e.getMessage()); - assertEquals(new BytesArray("stuff"), new ContentRestRequest("stuff", emptyMap()).requiredContent()); + assertEquals(new BytesArray("stuff"), contentRestRequest("stuff", emptyMap()).requiredContent()); assertEquals(new BytesArray("stuff"), - new ContentRestRequest("stuff", MapBuilder.newMapBuilder() + contentRestRequest("stuff", MapBuilder.newMapBuilder() .put("source", "stuff2").put("source_content_type", "application/json").immutableMap()).requiredContent()); e = expectThrows(ElasticsearchParseException.class, () -> - new ContentRestRequest("", MapBuilder.newMapBuilder() + contentRestRequest("", MapBuilder.newMapBuilder() .put("source", "{\"foo\": \"stuff\"}").put("source_content_type", "application/json").immutableMap()) .requiredContent()); assertEquals("request body is required", e.getMessage()); e = expectThrows(IllegalStateException.class, () -> - new ContentRestRequest("test", null, Collections.emptyMap()).requiredContent()); + contentRestRequest("test", null, Collections.emptyMap()).requiredContent()); assertEquals("unknown content type", e.getMessage()); } + private static RestRequest contentRestRequest(String content, Map params) { + Map> headers = new HashMap<>(); + headers.put("Content-Type", Collections.singletonList("application/json")); + return contentRestRequest(content, params, headers); + } + + private static RestRequest contentRestRequest(String content, Map params, Map> headers) { + FakeRestRequest.Builder builder = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY); + builder.withHeaders(headers); + builder.withContent(new BytesArray(content), null); + builder.withParams(params); + return new ContentRestRequest(builder.build()); + } + private static final class ContentRestRequest extends RestRequest { - private final BytesArray content; - ContentRestRequest(String content, Map params) { - this(content, params, Collections.singletonMap("Content-Type", Collections.singletonList("application/json"))); - } + private final RestRequest restRequest; - ContentRestRequest(String content, Map params, Map> headers) { - super(NamedXContentRegistry.EMPTY, params, "not used by this test", headers); - this.content = new BytesArray(content); + private ContentRestRequest(RestRequest restRequest) { + super(restRequest.getXContentRegistry(), restRequest.params(), restRequest.path(), restRequest.getHeaders(), + restRequest.getHttpRequest(), restRequest.getHttpChannel()); + this.restRequest = restRequest; } @Override - public boolean hasContent() { - return Strings.hasLength(content); + public Method method() { + return restRequest.method(); } @Override - public BytesReference content() { - return content; + public String uri() { + return restRequest.uri(); } @Override - public String uri() { - throw new UnsupportedOperationException("Not used by this test"); + public boolean hasContent() { + return Strings.hasLength(content()); } @Override - public Method method() { - throw new UnsupportedOperationException("Not used by this test"); + public BytesReference content() { + return restRequest.content(); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index 9417cc092d828..7a7c66d21aada 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -118,7 +118,7 @@ public static void initMockScripts() { SCRIPTS.put("initScriptParams", params -> { Map agg = (Map) params.get("_agg"); Integer initialValue = (Integer)params.get("initialValue"); - ArrayList collector = new ArrayList(); + ArrayList collector = new ArrayList<>(); collector.add(initialValue); agg.put("collector", collector); return agg; @@ -175,7 +175,6 @@ public void testNoDocs() throws IOException { /** * without combine script, the "_aggs" map should contain a list of the size of the number of documents matched */ - @SuppressWarnings("unchecked") public void testScriptedMetricWithoutCombine() throws IOException { try (Directory directory = newDirectory()) { int numDocs = randomInt(100); @@ -190,8 +189,11 @@ public void testScriptedMetricWithoutCombine() throws IOException { ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); assertEquals(AGG_NAME, scriptedMetric.getName()); assertNotNull(scriptedMetric.aggregation()); + @SuppressWarnings("unchecked") Map agg = (Map) scriptedMetric.aggregation(); - assertEquals(numDocs, ((List) agg.get("collector")).size()); + @SuppressWarnings("unchecked") + List list = (List) agg.get("collector"); + assertEquals(numDocs, list.size()); } } } @@ -300,10 +302,9 @@ public void testSelfReferencingAggStateAfterInit() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/31307") public void testSelfReferencingAggStateAfterMap() throws IOException { try (Directory directory = newDirectory()) { - Integer numDocs = randomInt(100); + Integer numDocs = randomIntBetween(1, 100); try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { for (int i = 0; i < numDocs; i++) { indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 06499aa544e9f..c9ca1637b1ade 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1518,9 +1518,9 @@ public void testRenameOnRestore() throws Exception { ensureGreen(); assertAcked(client.admin().indices().prepareAliases() - .addAlias("test-idx-1", "alias-1") - .addAlias("test-idx-2", "alias-2") - .addAlias("test-idx-3", "alias-3") + .addAlias("test-idx-1", "alias-1", false) + .addAlias("test-idx-2", "alias-2", false) + .addAlias("test-idx-3", "alias-3", false) ); logger.info("--> indexing some data"); diff --git a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java index 3e42e3b304e00..e88a9f0a38d2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/index/RandomCreateIndexGenerator.java @@ -138,6 +138,10 @@ private static Alias randomAlias() { alias.filter("{\"term\":{\"year\":2016}}"); } + if (randomBoolean()) { + alias.writeIndex(randomBoolean()); + } + return alias; } } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index c5b89adfd738e..3fded43d858ed 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -219,6 +219,8 @@ private static String toCamelCase(String s) { // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. .put("protectedterm", Void.class) + // LUCENE-8332 + .put("concatenategraph", Void.class) .immutableMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index d0403736400cd..4d4743156c73d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -19,12 +19,18 @@ package org.elasticsearch.test.rest; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.http.HttpResponse; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; -import java.net.SocketAddress; +import java.net.InetSocketAddress; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -32,45 +38,115 @@ public class FakeRestRequest extends RestRequest { - private final BytesReference content; - private final Method method; - private final SocketAddress remoteAddress; - public FakeRestRequest() { - this(NamedXContentRegistry.EMPTY, new HashMap<>(), new HashMap<>(), null, Method.GET, "/", null); + this(NamedXContentRegistry.EMPTY, new FakeHttpRequest(Method.GET, "", BytesArray.EMPTY, new HashMap<>()), new HashMap<>(), + new FakeHttpChannel(null)); } - private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, - Map params, BytesReference content, Method method, String path, SocketAddress remoteAddress) { - super(xContentRegistry, params, path, headers); - this.content = content; - this.method = method; - this.remoteAddress = remoteAddress; + private FakeRestRequest(NamedXContentRegistry xContentRegistry, HttpRequest httpRequest, Map params, + HttpChannel httpChannel) { + super(xContentRegistry, params, httpRequest.uri(), httpRequest.getHeaders(), httpRequest, httpChannel); } @Override - public Method method() { - return method; + public boolean hasContent() { + return content() != null; } - @Override - public String uri() { - return rawPath(); - } + private static class FakeHttpRequest implements HttpRequest { - @Override - public boolean hasContent() { - return content != null; - } + private final Method method; + private final String uri; + private final BytesReference content; + private final Map> headers; - @Override - public BytesReference content() { - return content; + private FakeHttpRequest(Method method, String uri, BytesReference content, Map> headers) { + this.method = method; + this.uri = uri; + this.content = content; + this.headers = headers; + } + + @Override + public Method method() { + return method; + } + + @Override + public String uri() { + return uri; + } + + @Override + public BytesReference content() { + return content; + } + + @Override + public Map> getHeaders() { + return headers; + } + + @Override + public List strictCookies() { + return Collections.emptyList(); + } + + @Override + public HttpVersion protocolVersion() { + return HttpVersion.HTTP_1_1; + } + + @Override + public HttpRequest removeHeader(String header) { + headers.remove(header); + return this; + } + + @Override + public HttpResponse createResponse(RestStatus status, BytesReference content) { + Map headers = new HashMap<>(); + return new HttpResponse() { + @Override + public void addHeader(String name, String value) { + headers.put(name, value); + } + + @Override + public boolean containsHeader(String name) { + return headers.containsKey(name); + } + }; + } } - @Override - public SocketAddress getRemoteAddress() { - return remoteAddress; + private static class FakeHttpChannel implements HttpChannel { + + private final InetSocketAddress remoteAddress; + + private FakeHttpChannel(InetSocketAddress remoteAddress) { + this.remoteAddress = remoteAddress; + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return remoteAddress; + } + + @Override + public void close() { + + } } public static class Builder { @@ -86,7 +162,7 @@ public static class Builder { private Method method = Method.GET; - private SocketAddress address = null; + private InetSocketAddress address = null; public Builder(NamedXContentRegistry xContentRegistry) { this.xContentRegistry = xContentRegistry; @@ -120,15 +196,14 @@ public Builder withMethod(Method method) { return this; } - public Builder withRemoteAddress(SocketAddress address) { + public Builder withRemoteAddress(InetSocketAddress address) { this.address = address; return this; } public FakeRestRequest build() { - return new FakeRestRequest(xContentRegistry, headers, params, content, method, path, address); + FakeHttpRequest fakeHttpRequest = new FakeHttpRequest(method, path, content, headers); + return new FakeRestRequest(xContentRegistry, fakeHttpRequest, params, new FakeHttpChannel(address)); } - } - } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 8cfbf11bd64b7..8697b0bedcdf5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; +import org.elasticsearch.client.HasAttributeNodeSelector; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; @@ -31,6 +32,7 @@ import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; @@ -131,11 +133,10 @@ public static DoSection parse(XContentParser parser) throws IOException { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { selectorName = parser.currentName(); - } else if (token.isValue()) { - NodeSelector newSelector = buildNodeSelector( - parser.getTokenLocation(), selectorName, parser.text()); - nodeSelector = nodeSelector == NodeSelector.ANY ? - newSelector : new ComposeNodeSelector(nodeSelector, newSelector); + } else { + NodeSelector newSelector = buildNodeSelector(selectorName, parser); + nodeSelector = nodeSelector == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(nodeSelector, newSelector); } } } else if (currentFieldName != null) { // must be part of API call then @@ -368,34 +369,64 @@ private String formatStatusCodeMessage(ClientYamlTestResponse restTestResponse, not(equalTo(409))))); } - private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) { + private static NodeSelector buildNodeSelector(String name, XContentParser parser) throws IOException { switch (name) { + case "attribute": + return parseAttributeValuesSelector(parser); case "version": - Version[] range = SkipSection.parseVersionRange(value); - return new NodeSelector() { - @Override - public void select(Iterable nodes) { - for (Iterator itr = nodes.iterator(); itr.hasNext();) { - Node node = itr.next(); - if (node.getVersion() == null) { - throw new IllegalStateException("expected [version] metadata to be set but got " - + node); - } - Version version = Version.fromString(node.getVersion()); - if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { - itr.remove(); - } + return parseVersionSelector(parser); + default: + throw new XContentParseException(parser.getTokenLocation(), "unknown node_selector [" + name + "]"); + } + } + + private static NodeSelector parseAttributeValuesSelector(XContentParser parser) throws IOException { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new XContentParseException(parser.getTokenLocation(), "expected START_OBJECT"); + } + String key = null; + XContentParser.Token token; + NodeSelector result = NodeSelector.ANY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + key = parser.currentName(); + } else if (token.isValue()) { + NodeSelector newSelector = new HasAttributeNodeSelector(key, parser.text()); + result = result == NodeSelector.ANY ? + newSelector : new ComposeNodeSelector(result, newSelector); + } else { + throw new XContentParseException(parser.getTokenLocation(), "expected [" + key + "] to be a value"); + } + } + return result; + } + + private static NodeSelector parseVersionSelector(XContentParser parser) throws IOException { + if (false == parser.currentToken().isValue()) { + throw new XContentParseException(parser.getTokenLocation(), "expected [version] to be a value"); + } + Version[] range = SkipSection.parseVersionRange(parser.text()); + return new NodeSelector() { + @Override + public void select(Iterable nodes) { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + Node node = itr.next(); + if (node.getVersion() == null) { + throw new IllegalStateException("expected [version] metadata to be set but got " + + node); + } + Version version = Version.fromString(node.getVersion()); + if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) { + itr.remove(); } } + } - @Override - public String toString() { - return "version between [" + range[0] + "] and [" + range[1] + "]"; - } - }; - default: - throw new IllegalArgumentException("unknown node_selector [" + name + "]"); - } + @Override + public String toString() { + return "version between [" + range[0] + "] and [" + range[1] + "]"; + } + }; } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java index 81fc934ca6d7e..318c70c2933d8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -46,6 +46,7 @@ import java.io.UncheckedIOException; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -94,9 +95,17 @@ public CapturedRequest[] capturedRequests() { * @return the captured requests */ public CapturedRequest[] getCapturedRequestsAndClear() { - CapturedRequest[] capturedRequests = capturedRequests(); - clear(); - return capturedRequests; + List requests = new ArrayList<>(capturedRequests.size()); + capturedRequests.drainTo(requests); + return requests.toArray(new CapturedRequest[0]); + } + + private Map> groupRequestsByTargetNode(Collection requests) { + Map> result = new HashMap<>(); + for (CapturedRequest request : requests) { + result.computeIfAbsent(request.node.getId(), node -> new ArrayList<>()).add(request); + } + return result; } /** @@ -104,16 +113,7 @@ public CapturedRequest[] getCapturedRequestsAndClear() { * Doesn't clear the captured request list. See {@link #clear()} */ public Map> capturedRequestsByTargetNode() { - Map> map = new HashMap<>(); - for (CapturedRequest request : capturedRequests) { - List nodeList = map.get(request.node.getId()); - if (nodeList == null) { - nodeList = new ArrayList<>(); - map.put(request.node.getId(), nodeList); - } - nodeList.add(request); - } - return map; + return groupRequestsByTargetNode(capturedRequests); } /** @@ -125,9 +125,9 @@ public Map> capturedRequestsByTargetNode() { * @return the captured requests grouped by target node */ public Map> getCapturedRequestsByTargetNodeAndClear() { - Map> map = capturedRequestsByTargetNode(); - clear(); - return map; + List requests = new ArrayList<>(capturedRequests.size()); + capturedRequests.drainTo(requests); + return groupRequestsByTargetNode(requests); } /** clears captured requests */ diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 719044cfc81c2..c4c96d9fe2b42 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -511,7 +512,7 @@ public void testParseDoSectionExpectedWarnings() throws Exception { "just one entry this time"))); } - public void testNodeSelector() throws IOException { + public void testNodeSelectorByVersion() throws IOException { parser = createParser(YamlXContent.yamlXContent, "node_selector:\n" + " version: 5.2.0-6.0.0\n" + @@ -541,8 +542,90 @@ public void testNodeSelector() throws IOException { emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector()); } - private Node nodeWithVersion(String version) { - return new Node(new HttpHost("dummy"), null, null, version, null); + private static Node nodeWithVersion(String version) { + return new Node(new HttpHost("dummy"), null, null, version, null, null); + } + + public void testNodeSelectorByAttribute() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " attribute:\n" + + " attr: val\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node hasAttr = nodeWithAttributes(singletonMap("attr", singletonList("val"))); + Node hasAttrWrongValue = nodeWithAttributes(singletonMap("attr", singletonList("notval"))); + Node notHasAttr = nodeWithAttributes(singletonMap("notattr", singletonList("val"))); + { + List nodes = new ArrayList<>(); + nodes.add(hasAttr); + nodes.add(hasAttrWrongValue); + nodes.add(notHasAttr); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(hasAttr), nodes); + } + + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " attribute:\n" + + " attr: val\n" + + " attr2: val2\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSectionWithTwoAttributes = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node hasAttr2 = nodeWithAttributes(singletonMap("attr2", singletonList("val2"))); + Map> bothAttributes = new HashMap<>(); + bothAttributes.put("attr", singletonList("val")); + bothAttributes.put("attr2", singletonList("val2")); + Node hasBoth = nodeWithAttributes(bothAttributes); + { + List nodes = new ArrayList<>(); + nodes.add(hasAttr); + nodes.add(hasAttrWrongValue); + nodes.add(notHasAttr); + nodes.add(hasAttr2); + nodes.add(hasBoth); + doSectionWithTwoAttributes.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(hasBoth), nodes); + } + } + + private static Node nodeWithAttributes(Map> attributes) { + return new Node(new HttpHost("dummy"), null, null, null, null, attributes); + } + + public void testNodeSelectorByTwoThings() throws IOException { + parser = createParser(YamlXContent.yamlXContent, + "node_selector:\n" + + " version: 5.2.0-6.0.0\n" + + " attribute:\n" + + " attr: val\n" + + "indices.get_field_mapping:\n" + + " index: test_index" + ); + + DoSection doSection = DoSection.parse(parser); + assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector()); + Node both = nodeWithVersionAndAttributes("5.2.1", singletonMap("attr", singletonList("val"))); + Node badVersion = nodeWithVersionAndAttributes("5.1.1", singletonMap("attr", singletonList("val"))); + Node badAttr = nodeWithVersionAndAttributes("5.2.1", singletonMap("notattr", singletonList("val"))); + List nodes = new ArrayList<>(); + nodes.add(both); + nodes.add(badVersion); + nodes.add(badAttr); + doSection.getApiCallSection().getNodeSelector().select(nodes); + assertEquals(Arrays.asList(both), nodes); + } + + private static Node nodeWithVersionAndAttributes(String version, Map> attributes) { + return new Node(new HttpHost("dummy"), null, null, version, null, attributes); } private void assertJsonEquals(Map actual, String expected) throws IOException { diff --git a/x-pack/build.gradle b/x-pack/build.gradle index 91652b9e15015..6a064ff5b7c64 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -5,14 +5,6 @@ import org.elasticsearch.gradle.precommit.LicenseHeadersTask Project xpackRootProject = project -apply plugin: 'nebula.info-scm' -final String licenseCommit -if (version.endsWith('-SNAPSHOT')) { - licenseCommit = xpackRootProject.scminfo.change ?: "master" // leniency for non git builds -} else { - licenseCommit = "v${version}" -} - subprojects { group = 'org.elasticsearch.plugin' ext.xpackRootProject = xpackRootProject @@ -21,7 +13,7 @@ subprojects { ext.xpackModule = { String moduleName -> xpackProject("plugin:${moduleName}").path } ext.licenseName = 'Elastic License' - ext.licenseUrl = "https://raw.githubusercontent.com/elastic/elasticsearch/${licenseCommit}/licenses/ELASTIC-LICENSE.txt" + ext.licenseUrl = ext.elasticLicenseUrl project.ext.licenseFile = rootProject.file('licenses/ELASTIC-LICENSE.txt') project.ext.noticeFile = xpackRootProject.file('NOTICE.txt') diff --git a/x-pack/docs/en/rest-api/license/start-trial.asciidoc b/x-pack/docs/en/rest-api/license/start-trial.asciidoc index 7754f6feef79c..341c72853fd08 100644 --- a/x-pack/docs/en/rest-api/license/start-trial.asciidoc +++ b/x-pack/docs/en/rest-api/license/start-trial.asciidoc @@ -36,24 +36,6 @@ For more information, see [float] ==== Examples -The following example checks whether you are eligible to start a trial: - -[source,js] ------------------------------------------------------------- -GET _xpack/license/start_trial ------------------------------------------------------------- -// CONSOLE -// TEST[skip:license testing issues] - -Example response: -[source,js] ------------------------------------------------------------- -{ - "eligible_to_start_trial": true -} ------------------------------------------------------------- -// NOTCONSOLE - The following example starts a 30-day trial license. The acknowledge parameter is required as you are initiating a license that will expire. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a96de96fd4f44..049089e62cf26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -28,6 +28,7 @@ import org.elasticsearch.license.PostStartTrialAction; import org.elasticsearch.license.PutLicenseAction; import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; @@ -89,7 +90,7 @@ import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -325,9 +326,9 @@ public List getNamedWriteables() { StartDatafeedAction.DatafeedParams::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, OpenJobAction.JobParams::new), - // ML - Task statuses - new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new), - new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream), + // ML - Task states + new NamedWriteableRegistry.Entry(PersistentTaskState.class, JobTaskState.NAME, JobTaskState::new), + new NamedWriteableRegistry.Entry(PersistentTaskState.class, DatafeedState.NAME, DatafeedState::fromStream), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING, MachineLearningFeatureSetUsage::new), // monitoring @@ -350,7 +351,8 @@ public List getNamedWriteables() { // rollup new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, RollupJob.NAME, RollupJob::new), - new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new) + new NamedWriteableRegistry.Entry(Task.Status.class, RollupJobStatus.NAME, RollupJobStatus::new), + new NamedWriteableRegistry.Entry(PersistentTaskState.class, RollupJobStatus.NAME, RollupJobStatus::new) ); } @@ -365,9 +367,9 @@ public List getNamedXContent() { StartDatafeedAction.DatafeedParams::fromXContent), new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(OpenJobAction.TASK_NAME), OpenJobAction.JobParams::fromXContent), - // ML - Task statuses - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(DatafeedState.NAME), DatafeedState::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(JobTaskStatus.NAME), JobTaskStatus::fromXContent), + // ML - Task states + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(DatafeedState.NAME), DatafeedState::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(JobTaskState.NAME), JobTaskState::fromXContent), // watcher new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(WatcherMetaData.TYPE), WatcherMetaData::fromXContent), @@ -375,8 +377,12 @@ public List getNamedXContent() { new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE), LicensesMetaData::fromXContent), //rollup - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), RollupJob::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), + RollupJob::fromXContent), + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), + RollupJobStatus::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(RollupJobStatus.NAME), + RollupJobStatus::fromXContent) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 861f386a90966..5e145306f8c1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.groups.GroupOrJobLookup; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -402,9 +402,9 @@ public void markJobAsDeleted(String jobId, PersistentTasksCustomMetaData tasks, if (allowDeleteOpenJob == false) { PersistentTask jobTask = getJobTask(jobId, tasks); if (jobTask != null) { - JobTaskStatus jobTaskStatus = (JobTaskStatus) jobTask.getStatus(); + JobTaskState jobTaskState = (JobTaskState) jobTask.getState(); throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because the job is " - + ((jobTaskStatus == null) ? JobState.OPENING : jobTaskStatus.getState())); + + ((jobTaskState == null) ? JobState.OPENING : jobTaskState.getState())); } } Job.Builder jobBuilder = new Job.Builder(job); @@ -448,7 +448,7 @@ public static PersistentTask getDatafeedTask(String datafeedId, @Nullable Per public static JobState getJobState(String jobId, @Nullable PersistentTasksCustomMetaData tasks) { PersistentTask task = getJobTask(jobId, tasks); if (task != null) { - JobTaskStatus jobTaskState = (JobTaskStatus) task.getStatus(); + JobTaskState jobTaskState = (JobTaskState) task.getState(); if (jobTaskState == null) { return JobState.OPENING; } @@ -460,8 +460,8 @@ public static JobState getJobState(String jobId, @Nullable PersistentTasksCustom public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksCustomMetaData tasks) { PersistentTask task = getDatafeedTask(datafeedId, tasks); - if (task != null && task.getStatus() != null) { - return (DatafeedState) task.getStatus(); + if (task != null && task.getState() != null) { + return (DatafeedState) task.getState(); } else { // If we haven't started a datafeed then there will be no persistent task, // which is the same as if the datafeed was't started diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java index 7343600a6ee37..d894f7b339fe5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedState.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import java.io.IOException; @@ -20,7 +20,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public enum DatafeedState implements Task.Status { +public enum DatafeedState implements PersistentTaskState { STARTED, STOPPED, STARTING, STOPPING; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java similarity index 86% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java index de102798d1ca6..d9ab3357319c6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java @@ -12,25 +12,25 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.ml.action.OpenJobAction; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import java.io.IOException; import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public class JobTaskStatus implements Task.Status { +public class JobTaskState implements PersistentTaskState { public static final String NAME = OpenJobAction.TASK_NAME; private static ParseField STATE = new ParseField("state"); private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); - private static final ConstructingObjectParser PARSER = + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, - args -> new JobTaskStatus((JobState) args[0], (Long) args[1])); + args -> new JobTaskState((JobState) args[0], (Long) args[1])); static { PARSER.declareField(constructorArg(), p -> { @@ -42,7 +42,7 @@ public class JobTaskStatus implements Task.Status { PARSER.declareLong(constructorArg(), ALLOCATION_ID); } - public static JobTaskStatus fromXContent(XContentParser parser) { + public static JobTaskState fromXContent(XContentParser parser) { try { return PARSER.parse(parser, null); } catch (IOException e) { @@ -53,12 +53,12 @@ public static JobTaskStatus fromXContent(XContentParser parser) { private final JobState state; private final long allocationId; - public JobTaskStatus(JobState state, long allocationId) { + public JobTaskState(JobState state, long allocationId) { this.state = Objects.requireNonNull(state); this.allocationId = allocationId; } - public JobTaskStatus(StreamInput in) throws IOException { + public JobTaskState(StreamInput in) throws IOException { state = JobState.fromStream(in); allocationId = in.readLong(); } @@ -100,7 +100,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - JobTaskStatus that = (JobTaskStatus) o; + JobTaskState that = (JobTaskState) o; return state == that.state && Objects.equals(allocationId, that.allocationId); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java index 991f421265ea8..b11dfd476515c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/MlFilter.java @@ -17,11 +17,12 @@ import org.elasticsearch.xpack.core.ml.MlMetaIndex; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.SortedSet; +import java.util.TreeSet; public class MlFilter implements ToXContentObject, Writeable { @@ -53,9 +54,9 @@ private static ObjectParser createParser(boolean ignoreUnknownFie private final String id; private final String description; - private final List items; + private final SortedSet items; - public MlFilter(String id, String description, List items) { + public MlFilter(String id, String description, SortedSet items) { this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null"); this.description = description; this.items = Objects.requireNonNull(items, ITEMS.getPreferredName() + " must not be null"); @@ -68,7 +69,8 @@ public MlFilter(StreamInput in) throws IOException { } else { description = null; } - items = Arrays.asList(in.readStringArray()); + items = new TreeSet<>(); + items.addAll(Arrays.asList(in.readStringArray())); } @Override @@ -103,8 +105,8 @@ public String getDescription() { return description; } - public List getItems() { - return new ArrayList<>(items); + public SortedSet getItems() { + return Collections.unmodifiableSortedSet(items); } @Override @@ -142,7 +144,7 @@ public static class Builder { private String id; private String description; - private List items = Collections.emptyList(); + private SortedSet items = new TreeSet<>(); private Builder() {} @@ -162,12 +164,13 @@ public Builder setDescription(String description) { } public Builder setItems(List items) { - this.items = items; + this.items = new TreeSet<>(); + this.items.addAll(items); return this; } public Builder setItems(String... items) { - this.items = Arrays.asList(items); + setItems(Arrays.asList(items)); return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 86bc95e092ca3..4cbd5a3b4559a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; import java.io.IOException; @@ -30,7 +31,7 @@ * indexer's current position. When the allocated task updates its status, * it is providing a new version of this. */ -public class RollupJobStatus implements Task.Status { +public class RollupJobStatus implements Task.Status, PersistentTaskState { public static final String NAME = "xpack/rollup/job"; private final IndexerState state; @@ -73,7 +74,7 @@ public RollupJobStatus(StreamInput in) throws IOException { currentPosition = in.readBoolean() ? new TreeMap<>(in.readMap()) : null; } - public IndexerState getState() { + public IndexerState getIndexerState() { return state; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java index aec5b3a04d255..71424ec507f52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/rest/RestRequestFilter.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.security.rest; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -17,7 +16,6 @@ import org.elasticsearch.rest.RestRequest; import java.io.IOException; -import java.net.SocketAddress; import java.util.Map; import java.util.Set; @@ -33,37 +31,15 @@ public interface RestRequestFilter { default RestRequest getFilteredRequest(RestRequest restRequest) throws IOException { Set fields = getFilteredFields(); if (restRequest.hasContent() && fields.isEmpty() == false) { - return new RestRequest(restRequest.getXContentRegistry(), restRequest.params(), restRequest.path(), restRequest.getHeaders()) { + return new RestRequest(restRequest) { private BytesReference filteredBytes = null; - @Override - public Method method() { - return restRequest.method(); - } - - @Override - public String uri() { - return restRequest.uri(); - } - @Override public boolean hasContent() { return true; } - @Nullable - @Override - public SocketAddress getRemoteAddress() { - return restRequest.getRemoteAddress(); - } - - @Nullable - @Override - public SocketAddress getLocalAddress() { - return restRequest.getLocalAddress(); - } - @Override public BytesReference content() { if (filteredBytes == null) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 44fd61e1693ad..796cae375e3a6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -286,14 +286,14 @@ public Map> getTransports(Settings settings, ThreadP @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { Map> transports = new HashMap<>(); filterPlugins(NetworkPlugin.class).stream().forEach(p -> transports.putAll(p.getHttpTransports(settings, threadPool, bigArrays, - circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, dispatcher))); + pageCacheRecycler, circuitBreakerService, xContentRegistry, networkService, dispatcher))); return transports; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java index 78d87b82839a2..9ac6683f004c5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/MlFilterTests.java @@ -11,10 +11,9 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.TreeSet; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +39,7 @@ public static MlFilter createRandom(String filterId) { } int size = randomInt(10); - List items = new ArrayList<>(size); + TreeSet items = new TreeSet<>(); for (int i = 0; i < size; i++) { items.add(randomAlphaOfLengthBetween(1, 20)); } @@ -58,7 +57,7 @@ protected MlFilter doParseInstance(XContentParser parser) { } public void testNullId() { - NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", Collections.emptyList())); + NullPointerException ex = expectThrows(NullPointerException.class, () -> new MlFilter(null, "", new TreeSet<>())); assertEquals(MlFilter.ID.getPreferredName() + " must not be null", ex.getMessage()); } @@ -88,4 +87,14 @@ public void testLenientParser() throws IOException { MlFilter.LENIENT_PARSER.apply(parser, null); } } + + public void testItemsAreSorted() { + MlFilter filter = MlFilter.builder("foo").setItems("c", "b", "a").build(); + assertThat(filter.getItems(), contains("a", "b", "c")); + } + + public void testGetItemsReturnsUnmodifiable() { + MlFilter filter = MlFilter.builder("foo").setItems("c", "b", "a").build(); + expectThrows(UnsupportedOperationException.class, () -> filter.getItems().add("x")); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 36bcfe92f0075..083d4ce5b1514 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -34,7 +34,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -256,8 +256,8 @@ protected void doExecute(Task task, CloseJobAction.Request request, ActionListen @Override protected void taskOperation(CloseJobAction.Request request, TransportOpenJobAction.JobTask jobTask, ActionListener listener) { - JobTaskStatus taskStatus = new JobTaskStatus(JobState.CLOSING, jobTask.getAllocationId()); - jobTask.updatePersistentStatus(taskStatus, ActionListener.wrap(task -> { + JobTaskState taskState = new JobTaskState(JobState.CLOSING, jobTask.getAllocationId()); + jobTask.updatePersistentTaskState(taskState, ActionListener.wrap(task -> { // we need to fork because we are now on a network threadpool and closeJob method may take a while to complete: threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 5de7962169279..e7fb0fe5fb315 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -39,12 +39,12 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -57,7 +57,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; @@ -208,7 +208,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j persistentTasks.findTasks(OpenJobAction.TASK_NAME, task -> node.getId().equals(task.getExecutorNode())); for (PersistentTasksCustomMetaData.PersistentTask assignedTask : assignedTasks) { - JobTaskStatus jobTaskState = (JobTaskStatus) assignedTask.getStatus(); + JobTaskState jobTaskState = (JobTaskState) assignedTask.getState(); JobState jobState; if (jobTaskState == null || // executor node didn't have the chance to set job status to OPENING // previous executor node failed and current executor node didn't have the chance to set job status to OPENING @@ -675,14 +675,14 @@ public void validate(OpenJobAction.JobParams params, ClusterState clusterState) } @Override - protected void nodeOperation(AllocatedPersistentTask task, OpenJobAction.JobParams params, Task.Status status) { + protected void nodeOperation(AllocatedPersistentTask task, OpenJobAction.JobParams params, PersistentTaskState state) { JobTask jobTask = (JobTask) task; jobTask.autodetectProcessManager = autodetectProcessManager; - JobTaskStatus jobStateStatus = (JobTaskStatus) status; + JobTaskState jobTaskState = (JobTaskState) state; // If the job is failed then the Persistent Task Service will // try to restart it on a node restart. Exiting here leaves the // job in the failed state and it must be force closed. - if (jobStateStatus != null && jobStateStatus.getState().isAnyOf(JobState.FAILED, JobState.CLOSING)) { + if (jobTaskState != null && jobTaskState.getState().isAnyOf(JobState.FAILED, JobState.CLOSING)) { return; } @@ -766,8 +766,8 @@ private class JobPredicate implements Predicate persistentTask) { JobState jobState = JobState.CLOSED; if (persistentTask != null) { - JobTaskStatus jobStateStatus = (JobTaskStatus) persistentTask.getStatus(); - jobState = jobStateStatus == null ? JobState.OPENING : jobStateStatus.getState(); + JobTaskState jobTaskState = (JobTaskState) persistentTask.getState(); + jobState = jobTaskState == null ? JobState.OPENING : jobTaskState.getState(); PersistentTasksCustomMetaData.Assignment assignment = persistentTask.getAssignment(); // This logic is only appropriate when opening a job, not when reallocating following a failure, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 3d261864ab409..b13ed6d698451 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -274,8 +274,9 @@ public void validate(StartDatafeedAction.DatafeedParams params, ClusterState clu } @Override - protected void nodeOperation(AllocatedPersistentTask allocatedPersistentTask, StartDatafeedAction.DatafeedParams params, - Task.Status status) { + protected void nodeOperation(final AllocatedPersistentTask allocatedPersistentTask, + final StartDatafeedAction.DatafeedParams params, + final PersistentTaskState state) { DatafeedTask datafeedTask = (DatafeedTask) allocatedPersistentTask; datafeedTask.datafeedManager = datafeedManager; datafeedManager.run(datafeedTask, @@ -373,7 +374,7 @@ public boolean test(PersistentTasksCustomMetaData.PersistentTask persistentTa assignment.getExplanation() + "]", RestStatus.TOO_MANY_REQUESTS); return true; } - DatafeedState datafeedState = (DatafeedState) persistentTask.getStatus(); + DatafeedState datafeedState = (DatafeedState) persistentTask.getState(); return datafeedState == DatafeedState.STARTED; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 4b68f74eb1702..faf6aa80b7a6f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -222,10 +222,10 @@ public void onFailure(Exception e) { } @Override - protected void taskOperation(StopDatafeedAction.Request request, TransportStartDatafeedAction.DatafeedTask datafeedTaskTask, + protected void taskOperation(StopDatafeedAction.Request request, TransportStartDatafeedAction.DatafeedTask datafeedTask, ActionListener listener) { - DatafeedState taskStatus = DatafeedState.STOPPING; - datafeedTaskTask.updatePersistentStatus(taskStatus, ActionListener.wrap(task -> { + DatafeedState taskState = DatafeedState.STOPPING; + datafeedTask.updatePersistentTaskState(taskState, ActionListener.wrap(task -> { // we need to fork because we are now on a network threadpool threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(new AbstractRunnable() { @Override @@ -235,7 +235,7 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - datafeedTaskTask.stop("stop_datafeed (api)", request.getStopTimeout()); + datafeedTask.stop("stop_datafeed (api)", request.getStopTimeout()); listener.onResponse(new StopDatafeedAction.Response(true)); } }); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 69acbad20fb2d..338c111401acf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -88,7 +88,7 @@ public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer { Holder holder = new Holder(task, datafeed, datafeedJob, new ProblemTracker(auditor, job.getId()), taskHandler); runningDatafeedsOnThisNode.put(task.getAllocationId(), holder); - task.updatePersistentStatus(DatafeedState.STARTED, new ActionListener>() { + task.updatePersistentTaskState(DatafeedState.STARTED, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { taskRunner.runWhenJobIsOpened(task); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index 0eb57ab79be5d..bebf0f3935d92 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,12 +12,12 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.List; import java.util.Objects; @@ -64,11 +64,11 @@ private AssignmentFailure checkAssignment() { PriorityFailureCollector priorityFailureCollector = new PriorityFailureCollector(); priorityFailureCollector.add(verifyIndicesActive(datafeed)); - JobTaskStatus taskStatus = null; + JobTaskState jobTaskState = null; JobState jobState = JobState.CLOSED; if (jobTask != null) { - taskStatus = (JobTaskStatus) jobTask.getStatus(); - jobState = taskStatus == null ? JobState.OPENING : taskStatus.getState(); + jobTaskState = (JobTaskState) jobTask.getState(); + jobState = jobTaskState == null ? JobState.OPENING : jobTaskState.getState(); } if (jobState.isAnyOf(JobState.OPENING, JobState.OPENED) == false) { @@ -78,8 +78,8 @@ private AssignmentFailure checkAssignment() { priorityFailureCollector.add(new AssignmentFailure(reason, true)); } - if (taskStatus != null && taskStatus.isStatusStale(jobTask)) { - String reason = "cannot start datafeed [" + datafeed.getId() + "], job [" + datafeed.getJobId() + "] status is stale"; + if (jobTaskState != null && jobTaskState.isStatusStale(jobTask)) { + String reason = "cannot start datafeed [" + datafeed.getId() + "], job [" + datafeed.getJobId() + "] state is stale"; priorityFailureCollector.add(new AssignmentFailure(reason, true)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index d3a848ef3821f..b6efb688c1797 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -31,7 +31,7 @@ import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; @@ -623,8 +623,8 @@ public Optional jobOpenTime(JobTask jobTask) { } void setJobState(JobTask jobTask, JobState state) { - JobTaskStatus taskStatus = new JobTaskStatus(state, jobTask.getAllocationId()); - jobTask.updatePersistentStatus(taskStatus, new ActionListener>() { + JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId()); + jobTask.updatePersistentTaskState(jobTaskState, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { logger.info("Successfully set job state to [{}] for job [{}]", state, jobTask.getJobId()); @@ -638,8 +638,8 @@ public void onFailure(Exception e) { } void setJobState(JobTask jobTask, JobState state, CheckedConsumer handler) { - JobTaskStatus taskStatus = new JobTaskStatus(state, jobTask.getAllocationId()); - jobTask.updatePersistentStatus(taskStatus, new ActionListener>() { + JobTaskState jobTaskState = new JobTaskState(state, jobTask.getAllocationId()); + jobTask.updatePersistentTaskState(jobTaskState, new ActionListener>() { @Override public void onResponse(PersistentTask persistentTask) { try { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java index 8049b5655d63b..f6fb2db3c9bb9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetadataTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobTests; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -363,7 +363,7 @@ public void testGetJobState() { new PersistentTasksCustomMetaData.Assignment("bar", "test assignment")); assertEquals(JobState.OPENING, MlMetadata.getJobState("foo", tasksBuilder.build())); - tasksBuilder.updateTaskStatus(MlMetadata.jobTaskId("foo"), new JobTaskStatus(JobState.OPENED, tasksBuilder.getLastAllocationId())); + tasksBuilder.updateTaskState(MlMetadata.jobTaskId("foo"), new JobTaskState(JobState.OPENED, tasksBuilder.getLastAllocationId())); assertEquals(JobState.OPENED, MlMetadata.getJobState("foo", tasksBuilder.build())); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index f1679b8b0b9d1..d65fc1476e75e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -314,7 +314,7 @@ public static void addTask(String datafeedId, long startTime, String nodeId, Dat PersistentTasksCustomMetaData.Builder tasks) { tasks.addTask(MLMetadataField.datafeedTaskId(datafeedId), StartDatafeedAction.TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), new Assignment(nodeId, "test assignment")); - tasks.updateTaskStatus(MLMetadataField.datafeedTaskId(datafeedId), state); + tasks.updateTaskState(MLMetadataField.datafeedTaskId(datafeedId), state); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 6ef2d92d9c7c6..b5a315d9687bb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -42,7 +42,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.Operator; import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -329,7 +329,7 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { assertTrue(result.getExplanation().contains("because node exceeds [2] the maximum number of jobs [2] in opening state")); tasksBuilder = PersistentTasksCustomMetaData.builder(tasks); - tasksBuilder.updateTaskStatus(MlMetadata.jobTaskId("job_id6"), null); + tasksBuilder.updateTaskState(MlMetadata.jobTaskId("job_id6"), null); tasks = tasksBuilder.build(); csBuilder = ClusterState.builder(cs); @@ -630,7 +630,7 @@ public static void addJobTask(String jobId, String nodeId, JobState jobState, Pe builder.addTask(MlMetadata.jobTaskId(jobId), OpenJobAction.TASK_NAME, new OpenJobAction.JobParams(jobId), new Assignment(nodeId, "test assignment")); if (jobState != null) { - builder.updateTaskStatus(MlMetadata.jobTaskId(jobId), new JobTaskStatus(jobState, builder.getLastAllocationId())); + builder.updateTaskState(MlMetadata.jobTaskId(jobId), new JobTaskState(jobState, builder.getLastAllocationId())); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java index a61709be424e8..55a0f4006bcdd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java @@ -31,7 +31,7 @@ public void testValidate() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); tasksBuilder.addTask(MLMetadataField.datafeedTaskId("foo"), StartDatafeedAction.TASK_NAME, new StartDatafeedAction.DatafeedParams("foo", 0L), new PersistentTasksCustomMetaData.Assignment("node_id", "")); - tasksBuilder.updateTaskStatus(MLMetadataField.datafeedTaskId("foo"), DatafeedState.STARTED); + tasksBuilder.updateTaskState(MLMetadataField.datafeedTaskId("foo"), DatafeedState.STARTED); tasksBuilder.build(); Job job = createDatafeedJob().build(new Date()); @@ -121,6 +121,6 @@ public static void addTask(String datafeedId, long startTime, String nodeId, Dat taskBuilder.addTask(MLMetadataField.datafeedTaskId(datafeedId), StartDatafeedAction.TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), new PersistentTasksCustomMetaData.Assignment(nodeId, "test assignment")); - taskBuilder.updateTaskStatus(MLMetadataField.datafeedTaskId(datafeedId), state); + taskBuilder.updateTaskState(MLMetadataField.datafeedTaskId(datafeedId), state); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index bd722ebf8ef9a..f609f0c8c5ed9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -378,7 +378,7 @@ private static DatafeedTask createDatafeedTask(String datafeedId, long startTime ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(mock(PersistentTask.class)); return null; - }).when(task).updatePersistentStatus(any(), any()); + }).when(task).updatePersistentTaskState(any(), any()); return task; } @@ -394,7 +394,7 @@ private DatafeedTask spyDatafeedTask(DatafeedTask task) { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(mock(PersistentTask.class)); return null; - }).when(task).updatePersistentStatus(any(), any()); + }).when(task).updatePersistentTaskState(any(), any()); return task; } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 0fee78611a7bf..96ae3b5ef38b6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.junit.Before; @@ -255,20 +255,20 @@ public void testSelectNode_jobTaskStale() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), nodeId, JobState.OPENED, tasksBuilder); // Set to lower allocationId, so job task is stale: - tasksBuilder.updateTaskStatus(MlMetadata.jobTaskId(job.getId()), new JobTaskStatus(JobState.OPENED, 0)); + tasksBuilder.updateTaskState(MlMetadata.jobTaskId(job.getId()), new JobTaskState(JobState.OPENED, 0)); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode(); assertNull(result.getExecutorNode()); - assertEquals("cannot start datafeed [datafeed_id], job [job_id] status is stale", + assertEquals("cannot start datafeed [datafeed_id], job [job_id] state is stale", result.getExplanation()); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated()); assertThat(e.getMessage(), containsString("No node found to start datafeed [datafeed_id], allocation explanation " - + "[cannot start datafeed [datafeed_id], job [job_id] status is stale]")); + + "[cannot start datafeed [datafeed_id], job [job_id] state is stale]")); tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask(job.getId(), "node_id1", JobState.OPENED, tasksBuilder); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index ce47fb0adf80c..e3d67bb0bdb71 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -39,7 +39,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; @@ -211,9 +211,9 @@ public void testDedicatedMlNode() throws Exception { DiscoveryNode node = clusterState.nodes().resolveNode(task.getExecutorNode()); assertThat(node.getAttributes(), hasEntry(MachineLearning.ML_ENABLED_NODE_ATTR, "true")); assertThat(node.getAttributes(), hasEntry(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "20")); - JobTaskStatus jobTaskStatus = (JobTaskStatus) task.getStatus(); - assertNotNull(jobTaskStatus); - assertEquals(JobState.OPENED, jobTaskStatus.getState()); + JobTaskState jobTaskState = (JobTaskState) task.getState(); + assertNotNull(jobTaskState); + assertEquals(JobState.OPENED, jobTaskState.getState()); }); logger.info("stop the only running ml node"); @@ -264,7 +264,7 @@ public void testMaxConcurrentJobAllocations() throws Exception { for (DiscoveryNode node : event.state().nodes()) { Collection> foundTasks = tasks.findTasks(OpenJobAction.TASK_NAME, task -> { - JobTaskStatus jobTaskState = (JobTaskStatus) task.getStatus(); + JobTaskState jobTaskState = (JobTaskState) task.getState(); return node.getId().equals(task.getExecutorNode()) && (jobTaskState == null || jobTaskState.isStatusStale(task)); }); @@ -396,9 +396,9 @@ private void assertJobTask(String jobId, JobState expectedState, boolean hasExec assertThat(node.getAttributes(), hasEntry(MachineLearning.ML_ENABLED_NODE_ATTR, "true")); assertThat(node.getAttributes(), hasEntry(MachineLearning.MAX_OPEN_JOBS_NODE_ATTR, "20")); - JobTaskStatus jobTaskStatus = (JobTaskStatus) task.getStatus(); - assertNotNull(jobTaskStatus); - assertEquals(expectedState, jobTaskStatus.getState()); + JobTaskState jobTaskState = (JobTaskState) task.getState(); + assertNotNull(jobTaskState); + assertEquals(expectedState, jobTaskState.getState()); } else { assertNull(task.getExecutorNode()); } @@ -411,9 +411,9 @@ private CheckedRunnable checkAllJobsAreAssignedAndOpened(int numJobs) assertEquals(numJobs, tasks.taskMap().size()); for (PersistentTask task : tasks.taskMap().values()) { assertNotNull(task.getExecutorNode()); - JobTaskStatus jobTaskStatus = (JobTaskStatus) task.getStatus(); - assertNotNull(jobTaskStatus); - assertEquals(JobState.OPENED, jobTaskStatus.getState()); + JobTaskState jobTaskState = (JobTaskState) task.getState(); + assertNotNull(jobTaskState); + assertEquals(JobState.OPENED, jobTaskState.getState()); } }; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java index 17e7b89978e16..f06b73fcd40aa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -58,7 +58,7 @@ public void testCloseFailedJob() throws Exception { assertEquals(1, tasks.taskMap().size()); // now just double check that the first job is still opened: PersistentTasksCustomMetaData.PersistentTask task = tasks.getTask(MlMetadata.jobTaskId("close-failed-job-1")); - assertEquals(JobState.OPENED, ((JobTaskStatus) task.getStatus()).getState()); + assertEquals(JobState.OPENED, ((JobTaskState) task.getState()).getState()); } public void testSingleNode() throws Exception { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStatusTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java similarity index 53% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStatusTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java index 7183235b6ff68..4dfd1965804e5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStatusTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java @@ -9,22 +9,22 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; -public class JobTaskStatusTests extends AbstractSerializingTestCase { +public class JobTaskStateTests extends AbstractSerializingTestCase { @Override - protected JobTaskStatus createTestInstance() { - return new JobTaskStatus(randomFrom(JobState.values()), randomLong()); + protected JobTaskState createTestInstance() { + return new JobTaskState(randomFrom(JobState.values()), randomLong()); } @Override - protected Writeable.Reader instanceReader() { - return JobTaskStatus::new; + protected Writeable.Reader instanceReader() { + return JobTaskState::new; } @Override - protected JobTaskStatus doParseInstance(XContentParser parser) { - return JobTaskStatus.fromXContent(parser); + protected JobTaskState doParseInstance(XContentParser parser) { + return JobTaskState.fromXContent(parser); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index c3e830553a237..fa41cf0918f71 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; @@ -199,7 +199,7 @@ public void testOpenJob() { manager.openJob(jobTask, e -> {}); assertEquals(1, manager.numberOfOpenJobs()); assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); - verify(jobTask).updatePersistentStatus(eq(new JobTaskStatus(JobState.OPENED, 1L)), any()); + verify(jobTask).updatePersistentTaskState(eq(new JobTaskState(JobState.OPENED, 1L)), any()); } public void testOpenJob_exceedMaxNumJobs() { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 425629c248c9c..50b3f21800d06 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; @@ -62,7 +63,7 @@ public RollupJobPersistentTasksExecutor(Settings settings, Client client, Schedu } @Override - protected void nodeOperation(AllocatedPersistentTask task, @Nullable RollupJob params, Status status) { + protected void nodeOperation(AllocatedPersistentTask task, @Nullable RollupJob params, PersistentTaskState state) { RollupJobTask rollupJobTask = (RollupJobTask) task; SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(SCHEDULE_NAME + "_" + params.getConfig().getId(), new CronSchedule(params.getConfig().getCron())); @@ -80,7 +81,7 @@ protected AllocatedPersistentTask createTask(long id, String type, String action PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { return new RollupJobTask(id, type, action, parentTaskId, persistentTask.getParams(), - (RollupJobStatus) persistentTask.getStatus(), client, schedulerEngine, threadPool, headers); + (RollupJobStatus) persistentTask.getState(), client, schedulerEngine, threadPool, headers); } } @@ -115,15 +116,15 @@ protected void doNextBulk(BulkRequest request, ActionListener next } @Override - protected void doSaveState(IndexerState state, Map position, Runnable next) { - if (state.equals(IndexerState.ABORTING)) { + protected void doSaveState(IndexerState indexerState, Map position, Runnable next) { + if (indexerState.equals(IndexerState.ABORTING)) { // If we're aborting, just invoke `next` (which is likely an onFailure handler) next.run(); } else { // Otherwise, attempt to persist our state - final RollupJobStatus status = new RollupJobStatus(state, getPosition()); - logger.debug("Updating persistent status of job [" + job.getConfig().getId() + "] to [" + state.toString() + "]"); - updatePersistentStatus(status, ActionListener.wrap(task -> next.run(), exc -> next.run())); + final RollupJobStatus state = new RollupJobStatus(indexerState, getPosition()); + logger.debug("Updating persistent state of job [" + job.getConfig().getId() + "] to [" + indexerState.toString() + "]"); + updatePersistentTaskState(state, ActionListener.wrap(task -> next.run(), exc -> next.run())); } } @@ -148,7 +149,7 @@ protected void onAbort() { private final ThreadPool threadPool; private final RollupIndexer indexer; - RollupJobTask(long id, String type, String action, TaskId parentTask, RollupJob job, RollupJobStatus status, + RollupJobTask(long id, String type, String action, TaskId parentTask, RollupJob job, RollupJobStatus state, Client client, SchedulerEngine schedulerEngine, ThreadPool threadPool, Map headers) { super(id, type, action, RollupField.NAME + "_" + job.getConfig().getId(), parentTask, headers); this.job = job; @@ -158,16 +159,17 @@ protected void onAbort() { // If status is not null, we are resuming rather than starting fresh. Map initialPosition = null; IndexerState initialState = IndexerState.STOPPED; - if (status != null) { - logger.debug("We have existing status, setting state to [" + status.getState() + "] " + - "and current position to [" + status.getPosition() + "] for job [" + job.getConfig().getId() + "]"); - if (status.getState().equals(IndexerState.INDEXING)) { + if (state != null) { + final IndexerState existingState = state.getIndexerState(); + logger.debug("We have existing state, setting state to [" + existingState + "] " + + "and current position to [" + state.getPosition() + "] for job [" + job.getConfig().getId() + "]"); + if (existingState.equals(IndexerState.INDEXING)) { /* * If we were indexing, we have to reset back to STARTED otherwise the indexer will be "stuck" thinking * it is indexing but without the actual indexing thread running. */ initialState = IndexerState.STARTED; - } else if (status.getState().equals(IndexerState.ABORTING) || status.getState().equals(IndexerState.STOPPING)) { + } else if (existingState.equals(IndexerState.ABORTING) || existingState.equals(IndexerState.STOPPING)) { // It shouldn't be possible to persist ABORTING, but if for some reason it does, // play it safe and restore the job as STOPPED. An admin will have to clean it up, // but it won't be running, and won't delete itself either. Safest option. @@ -175,9 +177,9 @@ protected void onAbort() { // to restore as STOPEPD initialState = IndexerState.STOPPED; } else { - initialState = status.getState(); + initialState = existingState; } - initialPosition = status.getPosition(); + initialPosition = state.getPosition(); } this.indexer = new ClientRollupPageManager(job, initialState, initialPosition, new ParentTaskAssigningClient(client, new TaskId(getPersistentTaskId()))); @@ -227,20 +229,20 @@ public synchronized void start(ActionListener lis + " state was [" + newState + "]")); return; } - final RollupJobStatus status = new RollupJobStatus(IndexerState.STARTED, indexer.getPosition()); - logger.debug("Updating status for rollup job [" + job.getConfig().getId() + "] to [" + status.getState() + "][" + - status.getPosition() + "]"); - updatePersistentStatus(status, + final RollupJobStatus state = new RollupJobStatus(IndexerState.STARTED, indexer.getPosition()); + logger.debug("Updating state for rollup job [" + job.getConfig().getId() + "] to [" + state.getIndexerState() + "][" + + state.getPosition() + "]"); + updatePersistentTaskState(state, ActionListener.wrap( (task) -> { - logger.debug("Succesfully updated status for rollup job [" + job.getConfig().getId() + "] to [" - + status.getState() + "][" + status.getPosition() + "]"); + logger.debug("Succesfully updated state for rollup job [" + job.getConfig().getId() + "] to [" + + state.getIndexerState() + "][" + state.getPosition() + "]"); listener.onResponse(new StartRollupJobAction.Response(true)); }, (exc) -> { listener.onFailure( - new ElasticsearchException("Error while updating status for rollup job [" + job.getConfig().getId() - + "] to [" + status.getState() + "].", exc) + new ElasticsearchException("Error while updating state for rollup job [" + job.getConfig().getId() + + "] to [" + state.getIndexerState() + "].", exc) ); } ) @@ -268,17 +270,17 @@ public synchronized void stop(ActionListener liste case STOPPING: // update the persistent state only if there is no background job running, // otherwise the state is updated by the indexer when the background job detects the STOPPING state. - RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, indexer.getPosition()); - updatePersistentStatus(status, + RollupJobStatus state = new RollupJobStatus(IndexerState.STOPPED, indexer.getPosition()); + updatePersistentTaskState(state, ActionListener.wrap( (task) -> { - logger.debug("Succesfully updated status for rollup job [" + job.getConfig().getId() - + "] to [" + status.getState() + "]"); + logger.debug("Succesfully updated state for rollup job [" + job.getConfig().getId() + + "] to [" + state.getIndexerState() + "]"); listener.onResponse(new StopRollupJobAction.Response(true)); }, (exc) -> { - listener.onFailure(new ElasticsearchException("Error while updating status for rollup job [" - + job.getConfig().getId() + "] to [" + status.getState() + "].", exc)); + listener.onFailure(new ElasticsearchException("Error while updating state for rollup job [" + + job.getConfig().getId() + "] to [" + state.getIndexerState() + "].", exc)); }) ); break; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java index ce8bf936d9768..3f930cb42981d 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java @@ -7,10 +7,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; @@ -27,7 +25,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Netty4Plugin; @@ -104,7 +101,7 @@ protected Settings transportClientSettings() { } @Before - public void createIndex() throws Exception { + public void createIndex() { client().admin().indices().prepareCreate("test-1").addMapping("doc", "{\"doc\": {\"properties\": {" + "\"date_histo\": {\"type\": \"date\"}, " + "\"histo\": {\"type\": \"integer\"}, " + @@ -125,7 +122,7 @@ public void createIndex() throws Exception { } } } - BulkResponse response = bulk.get(); + bulk.get(); client().admin().indices().prepareRefresh("test-1").get(); } @@ -195,27 +192,23 @@ public void testIndexPattern() throws Exception { // Make sure it started ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "testIndexPattern"); - if (rollupJobStatus == null) {; + RollupJobStatus rollupJobStatus = getRollupJobStatus("testIndexPattern"); + if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STARTED) || state.equals(IndexerState.INDEXING)); }, 60, TimeUnit.SECONDS); // And wait for it to finish ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "testIndexPattern"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("testIndexPattern"); if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STARTED) && rollupJobStatus.getPosition() != null); }, 60, TimeUnit.SECONDS); @@ -274,23 +267,20 @@ public void testTwoJobsStartStopDeleteOne() throws Exception { // Make sure it started ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job1"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job1"); if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STARTED) || state.equals(IndexerState.INDEXING)); }, 60, TimeUnit.SECONDS); //but not the other task ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job2"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job2"); - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STOPPED)); }, 60, TimeUnit.SECONDS); @@ -301,9 +291,7 @@ public void testTwoJobsStartStopDeleteOne() throws Exception { // Make sure the first job's task is gone ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job1"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job1"); assertTrue(rollupJobStatus == null); }, 60, TimeUnit.SECONDS); @@ -320,10 +308,9 @@ public void testTwoJobsStartStopDeleteOne() throws Exception { // and still STOPPED ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, "job2"); + RollupJobStatus rollupJobStatus = getRollupJobStatus("job2"); - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); assertTrue(state.equals(IndexerState.STOPPED)); }, 60, TimeUnit.SECONDS); } @@ -404,19 +391,17 @@ public void testBig() throws Exception { Assert.assertThat(response.isStarted(), equalTo(true)); ESTestCase.assertBusy(() -> { - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, taskId); + RollupJobStatus rollupJobStatus = getRollupJobStatus(taskId); if (rollupJobStatus == null) { fail("null"); } - IndexerState state = rollupJobStatus.getState(); + IndexerState state = rollupJobStatus.getIndexerState(); logger.error("state: [" + state + "]"); assertTrue(state.equals(IndexerState.STARTED) && rollupJobStatus.getPosition() != null); }, 60, TimeUnit.SECONDS); - ListTasksResponse tasksResponse = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - RollupJobStatus rollupJobStatus = getRollupJobStatus(tasksResponse, taskId); + RollupJobStatus rollupJobStatus = getRollupJobStatus(taskId); if (rollupJobStatus == null) { Assert.fail("rollup job status should not be null"); } @@ -481,11 +466,13 @@ private void verifyAgg(InternalDateHistogram verify, InternalDateHistogram rollu } } - private RollupJobStatus getRollupJobStatus(ListTasksResponse tasksResponse, String taskId) { - for (TaskInfo task : tasksResponse.getTasks()) { - if (task.getDescription().equals("rollup_" + taskId)) { - return ((RollupJobStatus) task.getStatus()); - } + private RollupJobStatus getRollupJobStatus(final String taskId) { + final GetRollupJobsAction.Request request = new GetRollupJobsAction.Request(taskId); + final GetRollupJobsAction.Response response = client().execute(GetRollupJobsAction.INSTANCE, request).actionGet(); + + if (response.getJobs() != null && response.getJobs().isEmpty() == false) { + assertThat("Expect 1 rollup job with id " + taskId, response.getJobs().size(), equalTo(1)); + return response.getJobs().iterator().next().getStatus(); } return null; } @@ -498,13 +485,13 @@ public void cleanup() throws ExecutionException, InterruptedException { for (GetRollupJobsAction.JobWrapper job : response.getJobs()) { StopRollupJobAction.Request stopRequest = new StopRollupJobAction.Request(job.getJob().getId()); try { - StopRollupJobAction.Response stopResponse = client().execute(StopRollupJobAction.INSTANCE, stopRequest).get(); + client().execute(StopRollupJobAction.INSTANCE, stopRequest).get(); } catch (ElasticsearchException e) { // } DeleteRollupJobAction.Request deleteRequest = new DeleteRollupJobAction.Request(job.getJob().getId()); - DeleteRollupJobAction.Response deleteResponse = client().execute(DeleteRollupJobAction.INSTANCE, deleteRequest).get(); + client().execute(DeleteRollupJobAction.INSTANCE, deleteRequest).get(); } } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index d12be5e6fc196..ffcae267340f8 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; @@ -64,7 +65,7 @@ public void testInitialStatusStopped() { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -77,7 +78,7 @@ public void testInitialStatusAborting() { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -90,7 +91,7 @@ public void testInitialStatusStopping() { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -103,7 +104,7 @@ public void testInitialStatusStarted() { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -116,7 +117,7 @@ public void testInitialStatusIndexing() { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); } @@ -128,7 +129,7 @@ public void testNoInitialStatus() { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); } @@ -140,7 +141,7 @@ public void testStartWhenStarted() throws InterruptedException { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -172,13 +173,14 @@ public void testStartWhenStopping() throws InterruptedException { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); int c = counter.get(); if (c == 0) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); } else if (c == 1) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED)); } else { fail("Should not have updated persistent statuse > 2 times"); } @@ -187,7 +189,7 @@ public void updatePersistentStatus(Status status, ActionListener() { @@ -248,14 +250,15 @@ public void testStartWhenStopped() throws InterruptedException { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus)status).getState(), equalTo(IndexerState.STARTED)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -264,7 +267,7 @@ public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus)status).getState(), equalTo(IndexerState.STARTED)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -301,7 +305,7 @@ public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus)status).getState(), equalTo(IndexerState.STARTED)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); CountDownLatch latch = new CountDownLatch(1); @@ -340,7 +345,7 @@ public void updatePersistentStatus(Status status, ActionListener> listener) { + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } else if (counterValue == 1) { @@ -405,14 +411,14 @@ public void updatePersistentStatus(Status status, ActionListener() { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); started.set(true); } @@ -424,7 +430,7 @@ public void onFailure(Exception e) { ESTestCase.awaitBusy(started::get); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); // Should still be started, not INDEXING + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); // Allow search response to return now latch.countDown(); @@ -475,11 +481,12 @@ public void testTriggerWithHeaders() throws InterruptedException { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { - assertThat(status, instanceOf(RollupJobStatus.class)); - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(taskState, instanceOf(RollupJobStatus.class)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse(new PersistentTasksCustomMetaData.PersistentTask<>("foo", RollupField.TASK_NAME, job, 1, new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } else if (counterValue == 1) { @@ -488,14 +495,14 @@ public void updatePersistentStatus(Status status, ActionListener() { @Override public void onResponse(StartRollupJobAction.Response response) { assertTrue(response.isStarted()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); started.set(true); } @@ -507,7 +514,7 @@ public void onFailure(Exception e) { ESTestCase.awaitBusy(started::get); task.triggered(new SchedulerEngine.Event(RollupJobTask.SCHEDULE_NAME + "_" + job.getConfig().getId(), 123, 123)); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.INDEXING)); // Should still be started, not INDEXING + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.INDEXING)); assertThat(task.getStats().getNumInvocations(), equalTo(1L)); // Allow search response to return now latch.countDown(); @@ -524,7 +531,7 @@ public void testStopWhenStopped() throws InterruptedException { SchedulerEngine schedulerEngine = new SchedulerEngine(Clock.systemUTC()); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); CountDownLatch latch = new CountDownLatch(1); task.stop(new ActionListener() { @@ -553,15 +560,16 @@ public void testStopWhenStopping() throws InterruptedException { RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override - public void updatePersistentStatus(Status status, ActionListener> listener) { - assertThat(status, instanceOf(RollupJobStatus.class)); + public void updatePersistentTaskState(PersistentTaskState taskState, + ActionListener> listener) { + assertThat(taskState, instanceOf(RollupJobStatus.class)); int c = counter.get(); if (c == 0) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STARTED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); } else if (c == 1) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED)); } else if (c == 2) { - assertThat(((RollupJobStatus) status).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STOPPED)); } else { fail("Should not have updated persistent statuse > 3 times"); } @@ -571,7 +579,7 @@ public void updatePersistentStatus(Status status, ActionListener() { @@ -642,7 +650,7 @@ public void markAsCompleted() { latch.countDown(); } }; - assertThat(((RollupJobStatus)task.getStatus()).getState(), equalTo(IndexerState.STOPPED)); + assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); task.onCancelled(); task.stop(new ActionListener() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 664745b19204b..c0bd7882c419a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -843,8 +843,8 @@ public Map> getTransports(Settings settings, ThreadP @Override public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index 1976722d65f36..1991c2685f24e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -69,7 +69,6 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.SocketAddress; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -829,10 +828,9 @@ private Message message(String type, @Nullable String action, @Nullable Authenti msg.builder.field(Field.REQUEST_BODY, restRequestContent(request)); } msg.builder.field(Field.ORIGIN_TYPE, "rest"); - SocketAddress address = request.getRemoteAddress(); - if (address instanceof InetSocketAddress) { - msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(((InetSocketAddress) request.getRemoteAddress()) - .getAddress())); + InetSocketAddress address = request.getHttpChannel().getRemoteAddress(); + if (address != null) { + msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(address.getAddress())); } else { msg.builder.field(Field.ORIGIN_ADDRESS, address); } @@ -854,10 +852,9 @@ private Message message(String type, @Nullable Tuple realms, @Nul msg.builder.field(Field.REQUEST_BODY, restRequestContent(request)); } msg.builder.field(Field.ORIGIN_TYPE, "rest"); - SocketAddress address = request.getRemoteAddress(); - if (address instanceof InetSocketAddress) { - msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(((InetSocketAddress) request.getRemoteAddress()) - .getAddress())); + InetSocketAddress address = request.getHttpChannel().getRemoteAddress(); + if (address != null) { + msg.builder.field(Field.ORIGIN_ADDRESS, NetworkAddress.format(address.getAddress())); } else { msg.builder.field(Field.ORIGIN_ADDRESS, address); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index 3b9a42179a577..5706f79011ac5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -38,7 +38,6 @@ import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.SocketAddress; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; @@ -544,13 +543,8 @@ static String subject(Authentication authentication) { } private static String hostAttributes(RestRequest request) { - String formattedAddress; - final SocketAddress socketAddress = request.getRemoteAddress(); - if (socketAddress instanceof InetSocketAddress) { - formattedAddress = NetworkAddress.format(((InetSocketAddress) socketAddress).getAddress()); - } else { - formattedAddress = socketAddress.toString(); - } + final InetSocketAddress socketAddress = request.getHttpChannel().getRemoteAddress(); + String formattedAddress = NetworkAddress.format(socketAddress.getAddress()); return "origin_address=[" + formattedAddress + "]"; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java index dcee6535cf337..ed50a5cfe84e7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/RemoteHostHeader.java @@ -20,7 +20,7 @@ public class RemoteHostHeader { * then be copied to the subsequent action requests. */ public static void process(RestRequest request, ThreadContext threadContext) { - threadContext.putTransient(KEY, request.getRemoteAddress()); + threadContext.putTransient(KEY, request.getHttpChannel().getRemoteAddress()); } /** diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java index 0f4da8b847c58..9109bb37e8c41 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/SecurityRestFilter.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.rest; +import io.netty.channel.Channel; import io.netty.handler.ssl.SslHandler; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -13,7 +14,8 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.http.netty4.Netty4HttpRequest; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.netty4.Netty4HttpChannel; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -50,10 +52,11 @@ public void handleRequest(RestRequest request, RestChannel channel, NodeClient c if (licenseState.isSecurityEnabled() && licenseState.isAuthAllowed() && request.method() != Method.OPTIONS) { // CORS - allow for preflight unauthenticated OPTIONS request if (extractClientCertificate) { - Netty4HttpRequest nettyHttpRequest = (Netty4HttpRequest) request; - SslHandler handler = nettyHttpRequest.getChannel().pipeline().get(SslHandler.class); + HttpChannel httpChannel = request.getHttpChannel(); + Channel nettyChannel = ((Netty4HttpChannel) httpChannel).getNettyChannel(); + SslHandler handler = nettyChannel.pipeline().get(SslHandler.class); assert handler != null; - ServerTransportFilter.extractClientCertificates(logger, threadContext, handler.engine(), nettyHttpRequest.getChannel()); + ServerTransportFilter.extractClientCertificates(logger, threadContext, handler.engine(), nettyChannel); } service.authenticate(maybeWrapRestRequest(request), ActionListener.wrap( authentication -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java index 01916b9138031..ac586c4945794 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java @@ -104,7 +104,7 @@ public ChannelHandler configureServerChannelHandler() { private final class HttpSslChannelHandler extends HttpChannelHandler { HttpSslChannelHandler() { - super(SecurityNetty4HttpServerTransport.this, httpHandlingSettings, threadPool.getThreadContext()); + super(SecurityNetty4HttpServerTransport.this, handlingSettings); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index 7878fdb92336a..2e2a931f78f87 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.http.HttpChannel; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestRequest; @@ -914,7 +915,9 @@ public void clearCredentials() { private RestRequest mockRestRequest() { RestRequest request = mock(RestRequest.class); - when(request.getRemoteAddress()).thenReturn(new InetSocketAddress(InetAddress.getLoopbackAddress(), 9200)); + HttpChannel httpChannel = mock(HttpChannel.class); + when(request.getHttpChannel()).thenReturn(httpChannel); + when(httpChannel.getRemoteAddress()).thenReturn(new InetSocketAddress(InetAddress.getLoopbackAddress(), 9200)); when(request.uri()).thenReturn("_uri"); return request; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java index 335673f1c0cbb..127784dcfc0db 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/RestRequestFilterTests.java @@ -88,6 +88,6 @@ public void testRemoteAddressWorks() throws IOException { new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withContent(content, XContentType.JSON) .withRemoteAddress(address).build(); RestRequest filtered = filter.getFilteredRequest(restRequest); - assertEquals(address, filtered.getRemoteAddress()); + assertEquals(address, filtered.getHttpChannel().getRemoteAddress()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java index 2857aee9b61ad..5db634c8d7be9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.http.HttpChannel; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -67,6 +68,7 @@ public void init() throws Exception { public void testProcess() throws Exception { RestRequest request = mock(RestRequest.class); + when(request.getHttpChannel()).thenReturn(mock(HttpChannel.class)); Authentication authentication = mock(Authentication.class); doAnswer((i) -> { ActionListener callback = diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 deleted file mode 100644 index 36bf03bbbdb54..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -003ed080e5184661e606091cd321c229798b22f8 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 new file mode 100644 index 0000000000000..134072bc13701 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-518d303506.jar.sha1 @@ -0,0 +1 @@ +c1bbf611535f0b0fd0ba14e8da67c8d645b95244 \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml index a1f7eee0dcc3d..2b7b86673e0d0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/filter_crud.yml @@ -22,7 +22,7 @@ setup: filter_id: filter-foo body: > { - "items": ["abc", "xyz"] + "items": ["xyz", "abc"] } - do: diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java index f70efc72506d3..9057db476ad77 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeAutodetectIntegTestCase.java @@ -26,13 +26,13 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.transport.Netty4Plugin; @@ -70,7 +70,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus; +import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -449,8 +449,8 @@ protected void ensureClusterStateConsistency() throws IOException { StartDatafeedAction.DatafeedParams::new)); entries.add(new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME, OpenJobAction.JobParams::new)); - entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new)); - entries.add(new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream)); + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskState.class, JobTaskState.NAME, JobTaskState::new)); + entries.add(new NamedWriteableRegistry.Entry(PersistentTaskState.class, DatafeedState.NAME, DatafeedState::fromStream)); entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetaData.TYPE, TokenMetaData::new)); final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries); ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index c1b238422e92e..ba0f4d5091e0f 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,8 +1,5 @@ --- "Test get old cluster job": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30982" - do: xpack.ml.get_jobs: job_id: old-cluster-job diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 6634722fac977..bb47524b41d87 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -8,9 +8,6 @@ setup: --- "Test open old jobs": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30982" - do: xpack.ml.open_job: job_id: old-cluster-job @@ -77,9 +74,6 @@ setup: --- "Test job with no model memory limit has established model memory after reopening": - - skip: - version: "all" - reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30982" - do: xpack.ml.open_job: job_id: no-model-memory-limit-job