diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 9cc5bb82552ab..85fe712fd8d85 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -744,6 +744,7 @@ class BuildPlugin implements Plugin { additionalTest.testClassesDir = test.testClassesDir additionalTest.configure(commonTestConfig(project)) additionalTest.configure(config) + additionalTest.dependsOn(project.tasks.testClasses) test.dependsOn(additionalTest) }); return test diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 15a4f21b17543..adacc1863c595 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -225,6 +225,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { * warning every time. */ current.println(" - skip:") current.println(" features: ") + current.println(" - default_shards") current.println(" - stash_in_key") current.println(" - stash_in_path") current.println(" - stash_path_replace") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index afbfc747541d7..cd6c7c36ee696 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -147,7 +147,7 @@ class ClusterConfiguration { // map from destination path, to source file Map extraConfigFiles = new HashMap<>() - LinkedHashMap plugins = new LinkedHashMap<>() + LinkedHashMap plugins = new LinkedHashMap<>() List modules = new ArrayList<>() @@ -185,6 +185,11 @@ class ClusterConfiguration { plugins.put(pluginProject.name, pluginProject) } + @Input + void mavenPlugin(String name, String mavenCoords) { + plugins.put(name, mavenCoords) + } + /** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */ @Input void module(Project moduleProject) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index ed066ddc96baa..b9a3839631855 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -99,8 +99,8 @@ class ClusterFormationTasks { // from mirrors using gradles built-in mechanism etc. configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion) - for (Map.Entry entry : config.plugins.entrySet()) { - configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(), bwcPlugins, config.bwcVersion) + for (Map.Entry entry : config.plugins.entrySet()) { + configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion) } bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS) @@ -150,10 +150,15 @@ class ClusterFormationTasks { } /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ - static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, Version elasticsearchVersion) { - verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject) - final String pluginName = findPluginName(pluginProject) - project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") + static void configureBwcPluginDependency(Project project, Object plugin, Configuration configuration, Version elasticsearchVersion) { + if (plugin instanceof Project) { + Project pluginProject = (Project)plugin + verifyProjectHasBuildPlugin(configuration.name, elasticsearchVersion, project, pluginProject) + final String pluginName = findPluginName(pluginProject) + project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") + } else { + project.dependencies.add(configuration.name, "${plugin}@zip") + } } /** @@ -210,9 +215,9 @@ class ClusterFormationTasks { } // install plugins - for (Map.Entry plugin : node.config.plugins.entrySet()) { - String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin') - setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue(), prefix) + for (String pluginName : node.config.plugins.keySet()) { + String actionName = pluginTaskName('install', pluginName, 'Plugin') + setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, pluginName, prefix) } // sets up any extra config files that need to be copied over to the ES instance; @@ -444,31 +449,40 @@ class ClusterFormationTasks { Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) List pluginFiles = [] - for (Map.Entry plugin : node.config.plugins.entrySet()) { + for (Map.Entry plugin : node.config.plugins.entrySet()) { - Project pluginProject = plugin.getValue() - verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - String configurationName = pluginConfigurationName(prefix, pluginProject) + String configurationName = pluginConfigurationName(prefix, plugin.key) Configuration configuration = project.configurations.findByName(configurationName) if (configuration == null) { configuration = project.configurations.create(configurationName) } - project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip')) - setup.dependsOn(pluginProject.tasks.bundlePlugin) - - // also allow rest tests to use the rest spec from the plugin - String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec') - Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName) - for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) { - File restApiDir = new File(resourceDir, 'rest-api-spec/api') - if (restApiDir.exists() == false) continue - if (copyRestSpec == null) { - copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy) - copyPlugins.dependsOn(copyRestSpec) - copyRestSpec.into(project.sourceSets.test.output.resourcesDir) + + if (plugin.getValue() instanceof Project) { + Project pluginProject = plugin.getValue() + verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) + + project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip')) + setup.dependsOn(pluginProject.tasks.bundlePlugin) + + // also allow rest tests to use the rest spec from the plugin + String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec') + Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName) + for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) { + File restApiDir = new File(resourceDir, 'rest-api-spec/api') + if (restApiDir.exists() == false) continue + if (copyRestSpec == null) { + copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy) + copyPlugins.dependsOn(copyRestSpec) + copyRestSpec.into(project.sourceSets.test.output.resourcesDir) + } + copyRestSpec.from(resourceDir).include('rest-api-spec/api/**') } - copyRestSpec.from(resourceDir).include('rest-api-spec/api/**') + } else { + project.dependencies.add(configurationName, "${plugin.getValue()}@zip") } + + + pluginFiles.add(configuration) } @@ -477,32 +491,37 @@ class ClusterFormationTasks { return copyPlugins } - private static String pluginConfigurationName(final String prefix, final Project project) { - return "_plugin_${prefix}_${project.path}".replace(':', '_') + private static String pluginConfigurationName(final String prefix, final String name) { + return "_plugin_${prefix}_${name}".replace(':', '_') } - private static String pluginBwcConfigurationName(final String prefix, final Project project) { - return "_plugin_bwc_${prefix}_${project.path}".replace(':', '_') + private static String pluginBwcConfigurationName(final String prefix, final String name) { + return "_plugin_bwc_${prefix}_${name}".replace(':', '_') } /** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */ static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) { Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins") - for (Map.Entry plugin : node.config.plugins.entrySet()) { - Project pluginProject = plugin.getValue() - verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - String configurationName = pluginBwcConfigurationName(prefix, pluginProject) + for (Map.Entry plugin : node.config.plugins.entrySet()) { + String configurationName = pluginBwcConfigurationName(prefix, plugin.key) Configuration configuration = project.configurations.findByName(configurationName) if (configuration == null) { configuration = project.configurations.create(configurationName) } - final String depName = findPluginName(pluginProject) + if (plugin.getValue() instanceof Project) { + Project pluginProject = plugin.getValue() + verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject) - Dependency dep = bwcPlugins.dependencies.find { - it.name == depName + final String depName = findPluginName(pluginProject) + + Dependency dep = bwcPlugins.dependencies.find { + it.name == depName + } + configuration.dependencies.add(dep) + } else { + project.dependencies.add(configurationName, "${plugin.getValue()}@zip") } - configuration.dependencies.add(dep) } Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) { @@ -527,12 +546,12 @@ class ClusterFormationTasks { return installModule } - static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin, String prefix) { + static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) { final FileCollection pluginZip; if (node.nodeVersion != VersionProperties.elasticsearch) { - pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, plugin)) + pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName)) } else { - pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, plugin)) + pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName)) } // delay reading the file location until execution time by wrapping in a closure within a GString final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}" diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java index 9210526e7c81c..d32c37dc2c44f 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java @@ -18,27 +18,19 @@ */ package org.elasticsearch.client.benchmark.rest; -import org.apache.http.HttpEntity; import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.conn.ConnectionKeepAliveStrategy; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.benchmark.AbstractBenchmark; import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor; import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -86,9 +78,10 @@ public boolean bulkIndex(List bulkData) { bulkRequestBody.append(bulkItem); bulkRequestBody.append("\n"); } - HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON); + Request request = new Request("POST", "/geonames/type/_noop_bulk"); + request.setJsonEntity(bulkRequestBody.toString()); try { - Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity); + Response response = client.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (Exception e) { throw new ElasticsearchException(e); @@ -107,9 +100,10 @@ private RestSearchRequestExecutor(RestClient client, String indexName) { @Override public boolean search(String source) { - HttpEntity searchBody = new NStringEntity(source, StandardCharsets.UTF_8); + Request request = new Request("GET", endpoint); + request.setJsonEntity(source); try { - Response response = client.performRequest("GET", endpoint, Collections.emptyMap(), searchBody); + Response response = client.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (IOException e) { throw new ElasticsearchException(e); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 7f59fcc831213..9782b1016b421 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -194,18 +194,16 @@ public void testBulkProcessorWaitOnClose() throws Exception { } public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { - - String createIndexBody = "{\n" + + Request request = new Request("PUT", "/test-ro"); + request.setJsonEntity("{\n" + " \"settings\" : {\n" + " \"index\" : {\n" + " \"blocks.write\" : true\n" + " }\n" + " }\n" + " \n" + - "}"; - - NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON); - Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity); + "}"); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); int bulkActions = randomIntBetween(10, 100); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index ee820871dbb3d..f384e5706b09a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -19,9 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.DocWriteRequest; @@ -39,6 +36,7 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; @@ -147,11 +145,10 @@ public void testExists() throws IOException { GetRequest getRequest = new GetRequest("index", "type", "id"); assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } - String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), - stringEntity); - assertEquals(201, response.getStatusLine().getStatusCode()); + IndexRequest index = new IndexRequest("index", "type", "id"); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index); { GetRequest getRequest = new GetRequest("index", "type", "id"); assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); @@ -175,12 +172,11 @@ public void testGet() throws IOException { assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } - + IndexRequest index = new IndexRequest("index", "type", "id"); String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), - stringEntity); - assertEquals(201, response.getStatusLine().getStatusCode()); + index.source(document, XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index); { GetRequest getRequest = new GetRequest("index", "type", "id").version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, @@ -271,18 +267,15 @@ public void testMultiGet() throws IOException { assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", response.getResponses()[1].getFailure().getFailure().getMessage()); } - - String document = "{\"field\":\"value1\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id1", Collections.singletonMap("refresh", "true"), - stringEntity); - assertEquals(201, r.getStatusLine().getStatusCode()); - - document = "{\"field\":\"value2\"}"; - stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity); - assertEquals(201, r.getStatusLine().getStatusCode()); - + BulkRequest bulk = new BulkRequest(); + bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + IndexRequest index = new IndexRequest("index", "type", "id1"); + index.source("{\"field\":\"value1\"}", XContentType.JSON); + bulk.add(index); + index = new IndexRequest("index", "type", "id2"); + index.source("{\"field\":\"value2\"}", XContentType.JSON); + bulk.add(index); + highLevelClient().bulk(bulk); { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 9828041332b32..549b4ce0a85c5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -312,14 +312,14 @@ public void testSearchWithMatrixStats() throws IOException { MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); assertEquals(5, matrixStats.getFieldCount("num")); assertEquals(56d, matrixStats.getMean("num"), 0d); - assertEquals(1830d, matrixStats.getVariance("num"), 0d); - assertEquals(0.09340198804973046, matrixStats.getSkewness("num"), 0d); + assertEquals(1830.0000000000002, matrixStats.getVariance("num"), 0d); + assertEquals(0.09340198804973039, matrixStats.getSkewness("num"), 0d); assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d); assertEquals(5, matrixStats.getFieldCount("num2")); assertEquals(29d, matrixStats.getMean("num2"), 0d); assertEquals(330d, matrixStats.getVariance("num2"), 0d); assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 1.0e-16); - assertEquals(1.3517561983471074, matrixStats.getKurtosis("num2"), 0d); + assertEquals(1.3517561983471071, matrixStats.getKurtosis("num2"), 0d); assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d); assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 9b0b1ab83a460..6641aa2fc7d25 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -19,8 +19,6 @@ package org.elasticsearch.client.documentation; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; @@ -49,6 +47,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; @@ -58,6 +57,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; @@ -271,16 +271,15 @@ public void testUpdate() throws Exception { IndexResponse indexResponse = client.index(indexRequest); assertSame(indexResponse.status(), RestStatus.CREATED); - XContentType xContentType = XContentType.JSON; - String script = Strings.toString(XContentBuilder.builder(xContentType.xContent()) + Request request = new Request("POST", "/_scripts/increment-field"); + request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder() .startObject() .startObject("script") .field("lang", "painless") .field("code", "ctx._source.field += params.count") .endObject() - .endObject()); - HttpEntity body = new NStringEntity(script, ContentType.create(xContentType.mediaType())); - Response response = client().performRequest(HttpPost.METHOD_NAME, "/_scripts/increment-field", emptyMap(), body); + .endObject())); + Response response = client().performRequest(request); assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); } { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java index 650ab882c36d2..489d4d9b1ed5f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -66,58 +67,22 @@ * -------------------------------------------------- */ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { - - public void testCreateIndex() throws IOException { - RestHighLevelClient client = highLevelClient(); - { - //tag::migration-create-index - Settings indexSettings = Settings.builder() // <1> - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - - String payload = Strings.toString(XContentFactory.jsonBuilder() // <2> - .startObject() - .startObject("settings") // <3> - .value(indexSettings) - .endObject() - .startObject("mappings") // <4> - .startObject("doc") - .startObject("properties") - .startObject("time") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject()); - - HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); // <5> - - Response response = client.getLowLevelClient().performRequest("PUT", "my-index", emptyMap(), entity); // <6> - if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { - // <7> - } - //end::migration-create-index - assertEquals(200, response.getStatusLine().getStatusCode()); - } - } - public void testClusterHealth() throws IOException { RestHighLevelClient client = highLevelClient(); { //tag::migration-cluster-health - Map parameters = singletonMap("wait_for_status", "green"); - Response response = client.getLowLevelClient().performRequest("GET", "/_cluster/health", parameters); // <1> + Request request = new Request("GET", "/_cluster/health"); + request.addParameter("wait_for_status", "green"); // <1> + Response response = client.getLowLevelClient().performRequest(request); // <2> ClusterHealthStatus healthStatus; - try (InputStream is = response.getEntity().getContent()) { // <2> - Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <3> - healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <4> + try (InputStream is = response.getEntity().getContent()) { // <3> + Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <4> + healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <5> } - if (healthStatus == ClusterHealthStatus.GREEN) { - // <5> + if (healthStatus != ClusterHealthStatus.GREEN) { + // <6> } //end::migration-cluster-health assertSame(ClusterHealthStatus.GREEN, healthStatus); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 4400d05a9f820..6fdc60fcb3394 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -800,7 +800,7 @@ public void testRankEval() throws Exception { double qualityLevel = evalQuality.getQualityLevel(); // <3> assertEquals(1.0 / 3.0, qualityLevel, 0.0); List hitsAndRatings = evalQuality.getHitsAndRatings(); - RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); + RatedSearchHit ratedSearchHit = hitsAndRatings.get(2); assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> assertFalse(ratedSearchHit.getRating().isPresent()); // <5> MetricDetail metricDetails = evalQuality.getMetricDetails(); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 5979c508de287..667e38a5167d7 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -351,11 +351,12 @@ private Response bodyTest(final String method) throws IOException { private Response bodyTest(final RestClient restClient, final String method) throws IOException { String requestBody = "{ \"field\": \"value\" }"; - StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); int statusCode = randomStatusCode(getRandom()); + Request request = new Request(method, "/" + statusCode); + request.setJsonEntity(requestBody); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), entity); + esResponse = restClient.performRequest(request); } catch(ResponseException e) { esResponse = e.getResponse(); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 2d419b213d686..714d2e57e6d20 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -58,11 +58,9 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java index c61b736bf6db1..74cc251f52c2f 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java @@ -21,18 +21,22 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.junit.Before; import java.io.IOException; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; /** * Tests for the "Location" header returned when returning {@code 201 CREATED}. */ public class CreatedLocationHeaderIT extends ESRestTestCase { + public void testCreate() throws IOException { locationTestCase("PUT", "test/test/1"); } @@ -54,8 +58,11 @@ public void testUpsert() throws IOException { private void locationTestCase(String method, String url) throws IOException { locationTestCase(client().performRequest(method, url, emptyMap(), new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); + // we have to delete the index otherwise the second indexing request will route to the single shard and not produce a 201 + final Response response = client().performRequest(new Request("DELETE", "test")); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); locationTestCase(client().performRequest(method, url + "?routing=cat", emptyMap(), - new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); + new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); } private void locationTestCase(Response response) throws IOException { @@ -65,4 +72,5 @@ private void locationTestCase(Response response) throws IOException { Response getResponse = client().performRequest("GET", location); assertEquals(singletonMap("test", "test"), entityAsMap(getResponse).get("_source")); } + } diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 5f7ed63cdd8ad..6eb26fde8f9f8 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -115,6 +115,9 @@ Rollup:: * Validate timezone in range queries to ensure they match the selected job when searching ({pull}30338[#30338]) +SQL:: +* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) + [float] === Regressions Fail snapshot operations early when creating or deleting a snapshot on a repository that has been @@ -160,7 +163,8 @@ analysis module. ({pull}30397[#30397]) [float] === Enhancements -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow +copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) Added new "Request" object flavored request methods in the RestClient. Prefer these instead of the multi-argument versions. ({pull}29623[#29623]) @@ -201,6 +205,8 @@ Rollup:: * Validate timezone in range queries to ensure they match the selected job when searching ({pull}30338[#30338]) +SQL:: +* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) Allocation:: @@ -241,6 +247,9 @@ Reduce the number of object allocations made by {security} when resolving the in Respect accept header on requests with no handler ({pull}30383[#30383]) +SQL:: +* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) + //[float] //=== Regressions diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index 1349ccb35fe3b..ad4e0613fc14a 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -2,7 +2,7 @@ == Migration Guide This section describes how to migrate existing code from the `TransportClient` -to the new Java High Level REST Client released with the version 5.6.0 +to the Java High Level REST Client released with the version 5.6.0 of Elasticsearch. === Motivations around a new Java client @@ -107,9 +107,6 @@ More importantly, the high-level client: request constructors like `new IndexRequest()` to create requests objects. The requests are then executed using synchronous or asynchronous dedicated methods like `client.index()` or `client.indexAsync()`. -- does not provide indices or cluster management APIs. Management -operations can be executed by external scripts or -<>. ==== How to migrate the way requests are built @@ -241,71 +238,6 @@ returned by the cluster. <4> The `onFailure()` method is called when an error occurs during the execution of the request. -[[java-rest-high-level-migration-manage-indices]] -==== Manage Indices using the Low-Level REST Client - -The low-level client is able to execute any kind of HTTP requests, and can -therefore be used to call the APIs that are not yet supported by the high level client. - -For example, creating a new index with the `TransportClient` may look like this: - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder() // <1> - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - -String mappings = XContentFactory.jsonBuilder() // <2> - .startObject() - .startObject("doc") - .startObject("properties") - .startObject("time") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - .string(); - -CreateIndexResponse response = transportClient.admin().indices() // <3> - .prepareCreate("my-index") - .setSettings(indexSettings) - .addMapping("doc", docMapping, XContentType.JSON) - .get(); - -if (response.isAcknowledged() == false) { - // <4> -} --------------------------------------------------- -<1> Define the settings of the index -<2> Define the mapping for document of type `doc` using a -`XContentBuilder` -<3> Create the index with the previous settings and mapping -using the `prepareCreate()` method. The execution is synchronous -and blocks on the `get()` method until the remote cluster returns -a response. -<4> Handle the situation where the index has not been created - -The same operation executed with the low-level client could be: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-create-index] --------------------------------------------------- -<1> Define the settings of the index -<2> Define the body of the HTTP request using a `XContentBuilder` with JSON format -<3> Include the settings in the request body -<4> Include the mappings in the request body -<5> Convert the request body from `String` to a `HttpEntity` and -set its content type (here, JSON) -<6> Execute the request using the low-level client. The execution is synchronous -and blocks on the `performRequest()` method until the remote cluster returns -a response. The low-level client can be retrieved from an existing `RestHighLevelClient` -instance through the `getLowLevelClient` getter method. -<7> Handle the situation where the index has not been created - - [[java-rest-high-level-migration-cluster-health]] ==== Checking Cluster Health using the Low-Level REST Client @@ -331,18 +263,18 @@ With the low-level client, the code can be changed to: -------------------------------------------------- include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-health] -------------------------------------------------- -<1> Call the cluster's health REST endpoint and wait for the cluster health to become green, -then get back a `Response` object. -<2> Retrieve an `InputStream` object in order to read the response's content -<3> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This +<1> Set up the request to wait for the cluster's health to become green if it isn't already. +<2> Make the request and the get back a `Response` object. +<3> Retrieve an `InputStream` object in order to read the response's content +<4> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This helper requires the content type of the response to be passed as an argument and returns a `Map` of objects. Values in the map can be of any type, including inner `Map` that are used to represent the JSON object hierarchy. -<4> Retrieve the value of the `status` field in the response map, casts it as a a `String` +<5> Retrieve the value of the `status` field in the response map, casts it as a a `String` object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus` object. This method throws an exception if the value does not corresponds to a valid cluster health status. -<5> Handle the situation where the cluster's health is not green +<6> Handle the situation where the cluster's health is not green Note that for convenience this example uses Elasticsearch's helpers to parse the JSON response body, but any other JSON parser could have been use instead. diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index e616359e8a818..3805b2e564ca4 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -137,8 +137,8 @@ Possible response: "took": 25, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 59cadf1518eba..7dd5dca61b9e4 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -60,8 +60,8 @@ The response for the above aggregation: "aggregations": { "centroid": { "location": { - "lat": 51.00982963806018, - "lon": 3.9662131061777472 + "lat": 51.009829603135586, + "lon": 3.9662130642682314 }, "count": 6 } @@ -113,8 +113,8 @@ The response for the above aggregation: "doc_count": 3, "centroid": { "location": { - "lat": 52.371655656024814, - "lon": 4.909563297405839 + "lat": 52.371655642054975, + "lon": 4.9095632415264845 }, "count": 3 } @@ -125,7 +125,7 @@ The response for the above aggregation: "centroid": { "location": { "lat": 48.86055548675358, - "lon": 2.3316944623366 + "lon": 2.331694420427084 }, "count": 2 } diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index 6e881121a0f67..3da1c60db0552 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -235,8 +235,8 @@ The output from the above is: "timed_out": false, "took": $body.took, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 3cf1f8403e230..9b6861627be40 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -294,8 +294,8 @@ GET my_index/_search "took": $body.took, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index e2824bb528584..42216a9a0fc14 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -300,11 +300,7 @@ Responds: "indices": { "twitter": { "shards": { - "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "1": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "2": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "3": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "4": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] + "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] } } } diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 3719758ff58e9..a9de182e3c00e 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -16,7 +16,7 @@ Might respond with: [source,txt] -------------------------------------------------- shards disk.indices disk.used disk.avail disk.total disk.percent host ip node - 5 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 + 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2/.+/ _cat] diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index ca2a1838adb02..5f053edf30866 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -14,7 +14,7 @@ GET /_cat/health?v [source,txt] -------------------------------------------------- epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -1475871424 16:17:04 elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/] // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] @@ -33,7 +33,7 @@ which looks like: [source,txt] -------------------------------------------------- cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 3a50a836d0fdb..2a5b865fefa47 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -18,7 +18,7 @@ Might respond with: -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] @@ -81,7 +81,7 @@ Which looks like: -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 88fb18b363745..a4c2c54d8eefd 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -17,8 +17,8 @@ might look like: ["source","txt",subs="attributes,callouts"] -------------------------------------------------- index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound -test 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -test1 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -------------------------------------------------- // TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat] diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 6cc99a25476d9..87c4e17f452ce 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -3,7 +3,7 @@ The cluster health API allows to get a very simple status on the health of the cluster. For example, on a quiet single node cluster with a single index -with 5 shards and one replica, this: +with one shard and one replica, this: [source,js] -------------------------------------------------- @@ -22,11 +22,11 @@ Returns this: "timed_out" : false, "number_of_nodes" : 1, "number_of_data_nodes" : 1, - "active_primary_shards" : 5, - "active_shards" : 5, + "active_primary_shards" : 1, + "active_shards" : 1, "relocating_shards" : 0, "initializing_shards" : 0, - "unassigned_shards" : 5, + "unassigned_shards" : 1, "delayed_unassigned_shards": 0, "number_of_pending_tasks" : 0, "number_of_in_flight_fetch": 0, diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 937917823f5a6..d684be80c00b8 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -95,7 +95,7 @@ Replication is important for two primary reasons: To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. -By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. +By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. @@ -366,11 +366,11 @@ And the response: [source,txt] -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -yellow open customer 95SQ4TSUT7mWBT7VNHH67A 5 1 0 0 260b 260b +yellow open customer 95SQ4TSUT7mWBT7VNHH67A 1 1 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat] -The results of the second command tells us that we now have 1 index named customer and it has 5 primary shards and 1 replica (the defaults) and it contains 0 documents in it. +The results of the second command tells us that we now have one index named customer and it has one primary shard and one replica (the defaults) and it contains zero documents in it. You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green. diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index 53164d366cd93..c6b9309fa3240 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -105,12 +105,13 @@ you index a document, it is indexed first on the primary shard, then on all <> of the primary shard. + - By default, an <> has 5 primary shards. You can - specify fewer or more primary shards to scale the number of - <> that your index can handle. + By default, an <> has one primary shard. You can specify + more primary shards to scale the number of <> + that your index can handle. + - You cannot change the number of primary shards in an index, once the - index is created. + You cannot change the number of primary shards in an index, once the index is + index is created. However, an index can be split into a new index using the + <>. + See also <> diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index 4e12dfd7ecad4..37901cb3abe62 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -78,31 +78,31 @@ GET index/_search "took": 2, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 2, - "max_score": 0.2876821, + "max_score": 0.18232156, "hits": [ { "_index": "index", "_type": "_doc", - "_id": "2", - "_score": 0.2876821, + "_id": "1", + "_score": 0.18232156, "_source": { - "body": "A pair of skis" + "body": "Ski resort" } }, { "_index": "index", "_type": "_doc", - "_id": "1", - "_score": 0.2876821, + "_id": "2", + "_score": 0.18232156, "_source": { - "body": "Ski resort" + "body": "A pair of skis" } } ] @@ -136,20 +136,20 @@ GET index/_search "took": 1, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.80259144, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.2876821, + "_score": 0.80259144, "_source": { "body": "Ski resort" } @@ -193,20 +193,20 @@ GET index/_search "took": 2, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.80259144, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.2876821, + "_score": 0.80259144, "_source": { "body": "Ski resort" } diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index ed0077a629d7c..54c0c1c1b157c 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -103,9 +103,14 @@ specific index module: `index.auto_expand_replicas`:: - Auto-expand the number of replicas based on the number of available nodes. + Auto-expand the number of replicas based on the number of data nodes in the cluster. Set to a dash delimited lower and upper bound (e.g. `0-5`) or use `all` - for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). + for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). + Note that the auto-expanded number of replicas does not take any other allocation + rules into account, such as <>, + <> or <>, + and this can lead to the cluster health becoming `YELLOW` if the applicable rules + prevent all the replicas from being allocated. `index.search.idle.after`:: How long a shard can not receive a search or get request until it's considered diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index db1f7c2fe00a9..8583afc96ab1f 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -106,11 +106,7 @@ which returns something similar to: "num_docs" : 0 } } - ], - "1": ..., - "2": ..., - "3": ..., - "4": ... + ] } } } @@ -120,10 +116,6 @@ which returns something similar to: // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -// TESTRESPONSE[s/"1": \.\.\./"1": $body.indices.twitter.shards.1/] -// TESTRESPONSE[s/"2": \.\.\./"2": $body.indices.twitter.shards.2/] -// TESTRESPONSE[s/"3": \.\.\./"3": $body.indices.twitter.shards.3/] -// TESTRESPONSE[s/"4": \.\.\./"4": $body.indices.twitter.shards.4/] <1> the `sync id` marker [float] diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 81d79c47472df..34e90e6799d78 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -42,7 +42,7 @@ PUT /my_source_index/_settings } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_source_index\n/] +// TEST[s/^/PUT my_source_index\n{"settings":{"index.number_of_shards":2}}\n/] <1> Forces the relocation of a copy of each shard to the node with name `shrink_node_name`. See <> for more options. @@ -62,11 +62,20 @@ the following request: [source,js] -------------------------------------------------- -POST my_source_index/_shrink/my_target_index +POST my_source_index/_shrink/my_target_index?copy_settings=true +{ + "settings": { + "index.routing.allocation.require._name": null, <1> + "index.blocks.write": null <2> + } +} -------------------------------------------------- // CONSOLE // TEST[continued] +<1> Clear the allocation requirement copied from the source index. +<2> Clear the index write block copied from the source index. + The above request returns immediately once the target index has been added to the cluster state -- it doesn't wait for the shrink operation to start. @@ -97,7 +106,7 @@ and accepts `settings` and `aliases` parameters for the target index: [source,js] -------------------------------------------------- -POST my_source_index/_shrink/my_target_index +POST my_source_index/_shrink/my_target_index?copy_settings=true { "settings": { "index.number_of_replicas": 1, @@ -110,7 +119,7 @@ POST my_source_index/_shrink/my_target_index } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true}}\n/] +// TEST[s/^/PUT my_source_index\n{"settings": {"index.number_of_shards":5,"index.blocks.write": true}}\n/] <1> The number of shards in the target index. This must be a factor of the number of shards in the source index. @@ -125,9 +134,11 @@ NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source index are not copied during a shrink operation. With the exception of non-copyable settings, settings from the source index can be copied to the target index by adding the URL -parameter `copy_settings=true` to the request. +parameter `copy_settings=true` to the request. Note that `copy_settings` can not +be set to `false`. The parameter `copy_settings` will be removed in 8.0.0 -deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] +deprecated[6.4.0, not copying settings is deprecated, copying settings will be +the default behavior in 7.x] [float] === Monitoring the shrink process diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index 58d34cfd9a705..aaed23459c34b 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -123,7 +123,7 @@ the following request: [source,js] -------------------------------------------------- -POST my_source_index/_split/my_target_index +POST my_source_index/_split/my_target_index?copy_settings=true { "settings": { "index.number_of_shards": 2 @@ -158,7 +158,7 @@ and accepts `settings` and `aliases` parameters for the target index: [source,js] -------------------------------------------------- -POST my_source_index/_split/my_target_index +POST my_source_index/_split/my_target_index?copy_settings=true { "settings": { "index.number_of_shards": 5 <1> @@ -181,9 +181,11 @@ NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source index are not copied during a split operation. With the exception of non-copyable settings, settings from the source index can be copied to the target index by adding the URL -parameter `copy_settings=true` to the request. +parameter `copy_settings=true` to the request. Note that `copy_settings` can not +be set to `false`. The parameter `copy_settings` will be removed in 8.0.0 -deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0] +deprecated[6.4.0, not copying settings is deprecated, copying settings will be +the default behavior in 7.x] [float] === Monitoring the split process diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 723f79c5dc499..3688a0e945414 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -83,31 +83,31 @@ both index and query time. "took": $body.took, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 2, - "max_score": 0.2876821, + "max_score": 0.47000363, "hits": [ { "_index": "index", "_type": "_doc", - "_id": "2", - "_score": 0.2876821, + "_id": "1", + "_score": 0.47000363, "_source": { - "foo": "bar" + "foo": "BÀR" } }, { "_index": "index", "_type": "_doc", - "_id": "1", - "_score": 0.2876821, + "_id": "2", + "_score": 0.47000363, "_source": { - "foo": "BÀR" + "foo": "bar" } } ] @@ -144,8 +144,8 @@ returns "took": 43, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index b5226b53ba0c7..066d3ce1ac597 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -194,8 +194,8 @@ now returns matches from the new index: "took": 3, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -389,8 +389,8 @@ This results in a response like this: "took": 6, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -549,8 +549,8 @@ GET /my_queries1/_search "took": 6, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped": 0, "failed": 0 }, diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index b6e465e34dfd4..0d2661c37b862 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -83,8 +83,8 @@ The above request will yield the following response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -227,8 +227,8 @@ GET /my-index/_search "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -299,7 +299,7 @@ Index response: "failed": 0 }, "result": "created", - "_seq_no" : 0, + "_seq_no" : 1, "_primary_term" : 1 } -------------------------------------------------- @@ -407,8 +407,8 @@ This will yield the following response. "took": 7, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -512,8 +512,8 @@ The slightly different response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -608,8 +608,8 @@ The above search request returns a response similar to this: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 35bc17e1f0fac..29b349c3b7adb 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -68,20 +68,20 @@ Response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.5753642, + "max_score": 0.87546873, "hits": [ { "_index": "my-index", "_type": "_doc", "_id": "2", - "_score": 0.5753642, + "_score": 0.87546873, "_source": { "codes": ["def", "ghi"], "required_matches": 2 diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index f1c6cf7c573f9..5c01fa53d45ec 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -37,8 +37,8 @@ tweets from the `twitter` index for a certain user. The result is: { "count" : 1, "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 } diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 1a7c45545769a..90ee35afa6172 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -18,7 +18,7 @@ Full example: GET /twitter/_search_shards -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\n/] +// TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] This will yield the following result: @@ -103,7 +103,7 @@ And specifying the same request, this time with a routing value: GET /twitter/_search_shards?routing=foo,bar -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\n/] +// TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] This will yield the following result: diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index e3101a5dfb438..9f9833bde9d5c 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -177,8 +177,8 @@ returns this response: -------------------------------------------------- { "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 }, @@ -251,8 +251,8 @@ Which should look like: "took": 6, "timed_out": false, "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 }, diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 2c0c8821355a7..20894e5773a37 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -218,8 +218,8 @@ Response: { "valid": true, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "failed": 0 }, "explanations": [ @@ -227,31 +227,7 @@ Response: "index": "twitter", "shard": 0, "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 1, - "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 2, - "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 3, - "valid": true, - "explanation": "(user:kimchi)^0.8333333" - }, - { - "index": "twitter", - "shard": 4, - "valid": true, - "explanation": "user:kimchy" + "explanation": "(user:kimchi)^0.8333333 user:kimchy" } ] } diff --git a/gradle.properties b/gradle.properties index a8a309f10678f..08b03629ad53a 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,2 +1,2 @@ org.gradle.daemon=false -org.gradle.jvmargs=-Xmx1792m +org.gradle.jvmargs=-Xmx2g diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharMatcher.java similarity index 99% rename from server/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharMatcher.java index b9e70d05bb77b..3d8bb8d275394 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CharMatcher.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharMatcher.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import java.util.HashSet; import java.util.Set; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ClassicTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java similarity index 87% rename from server/src/main/java/org/elasticsearch/index/analysis/ClassicTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java index 11f36dfa17702..e81f6b88d248c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ClassicTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ClassicTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.ClassicTokenizer; @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; /** * Factory for {@link ClassicTokenizer} @@ -33,7 +34,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory { private final int maxTokenLength; - public ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index a01eb52fdd498..c9b48f0c8650d 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -34,9 +34,11 @@ import org.apache.lucene.analysis.commongrams.CommonGramsFilter; import org.apache.lucene.analysis.core.DecimalDigitFilter; import org.apache.lucene.analysis.core.KeywordTokenizer; +import org.apache.lucene.analysis.core.LetterTokenizer; import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.UpperCaseFilter; +import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.cz.CzechStemFilter; import org.apache.lucene.analysis.de.GermanNormalizationFilter; import org.apache.lucene.analysis.de.GermanStemFilter; @@ -58,17 +60,25 @@ import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenFilter; +import org.apache.lucene.analysis.ngram.NGramTokenizer; +import org.apache.lucene.analysis.path.PathHierarchyTokenizer; +import org.apache.lucene.analysis.pattern.PatternTokenizer; import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter; import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.apache.lucene.analysis.shingle.ShingleFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; import org.apache.lucene.analysis.standard.ClassicFilter; +import org.apache.lucene.analysis.standard.ClassicTokenizer; +import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; +import org.apache.lucene.analysis.th.ThaiTokenizer; import org.apache.lucene.analysis.tr.ApostropheFilter; import org.apache.lucene.analysis.util.ElisionFilter; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; @@ -169,6 +179,19 @@ public Map> getTokenizers() { Map> tokenizers = new TreeMap<>(); tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new); tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new); + tokenizers.put("thai", ThaiTokenizerFactory::new); + tokenizers.put("nGram", NGramTokenizerFactory::new); + tokenizers.put("ngram", NGramTokenizerFactory::new); + tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new); + tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); + tokenizers.put("classic", ClassicTokenizerFactory::new); + tokenizers.put("letter", LetterTokenizerFactory::new); + tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); + tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); + tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); + tokenizers.put("pattern", PatternTokenizerFactory::new); + tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new); + tokenizers.put("whitespace", WhitespaceTokenizerFactory::new); return tokenizers; } @@ -283,6 +306,16 @@ public List getPreConfiguredTokenFilters() { public List getPreConfiguredTokenizers() { List tokenizers = new ArrayList<>(); tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram", + () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null)); + tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null)); + tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null)); tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() { @Override public String name() { @@ -294,6 +327,13 @@ public TokenStream create(TokenStream tokenStream) { return new LowerCaseFilter(tokenStream); } })); + + // Temporary shim for aliases. TODO deprecate after they are moved + tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new, null)); + tokenizers.add(PreConfiguredTokenizer.singleton("edgeNGram", + () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null)); + tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null)); + return tokenizers; } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java similarity index 86% rename from server/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java index 8210883b2f8f5..55a527cc792c8 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/EdgeNGramTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; @@ -25,19 +25,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; -import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars; +import static org.elasticsearch.analysis.common.NGramTokenizerFactory.parseTokenChars; public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { private final int minGram; - private final int maxGram; - private final CharMatcher matcher; - - public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java similarity index 84% rename from server/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java index 364c236762391..be98eb73a9cad 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/LetterTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LetterTokenizerFactory.java @@ -17,17 +17,18 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.LetterTokenizer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class LetterTokenizerFactory extends AbstractTokenizerFactory { - public LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java similarity index 82% rename from server/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java index 16939f0d153a5..8f0c5f759aa64 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java @@ -17,17 +17,19 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent { - public LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java similarity index 95% rename from server/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java index a5774cd9ce363..b67f67cb2fa75 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; import java.lang.reflect.Field; import java.lang.reflect.Modifier; @@ -83,7 +84,7 @@ static CharMatcher parseTokenChars(List characterClasses) { return builder.build(); } - public NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff(); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java similarity index 91% rename from server/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java index 2b686da2f26d6..c877fe6944e5b 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.path.PathHierarchyTokenizer; @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { @@ -35,7 +36,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { private final int skip; private final boolean reverse; - public PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); bufferSize = settings.getAsInt("buffer_size", 1024); String delimiter = settings.get("delimiter"); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java similarity index 88% rename from server/src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java index d11d88c085ea4..f850b68ac9829 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PatternTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PatternTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.pattern.PatternTokenizer; @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; import java.util.regex.Pattern; @@ -33,7 +34,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory { private final Pattern pattern; private final int group; - public PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ThaiTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java similarity index 85% rename from server/src/main/java/org/elasticsearch/index/analysis/ThaiTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java index 7f702192f1af3..b76aca42d36ee 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ThaiTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/ThaiTokenizerFactory.java @@ -17,20 +17,21 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.th.ThaiTokenizer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; /** * Factory for {@link ThaiTokenizer} */ public class ThaiTokenizerFactory extends AbstractTokenizerFactory { - public ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java similarity index 87% rename from server/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java index 79eb0c604d995..8040c88ea7fa5 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/UAX29URLEmailTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/UAX29URLEmailTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; @@ -25,12 +25,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory { private final int maxTokenLength; - public UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); } @@ -41,4 +42,4 @@ public Tokenizer create() { tokenizer.setMaxTokenLength(maxTokenLength); return tokenizer; } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java similarity index 87% rename from server/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java index c71747a596d6b..1f89d4688136f 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactory.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -26,13 +26,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory { static final String MAX_TOKEN_LENGTH = "max_token_length"; private Integer maxTokenLength; - public WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, name, settings); maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/CharMatcherTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharMatcherTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/index/analysis/CharMatcherTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharMatcherTests.java index 31f80a66cdacc..1427e5d84513f 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/CharMatcherTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharMatcherTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.elasticsearch.test.ESTestCase; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index befd26296a5c8..7deadcbcc25f6 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory; import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory; import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory; +import org.elasticsearch.index.analysis.KeywordTokenizerFactory; import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory; import org.elasticsearch.index.analysis.SynonymTokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase; @@ -45,6 +46,16 @@ protected Map> getTokenizers() { Map> tokenizers = new TreeMap<>(super.getTokenizers()); tokenizers.put("simplepattern", SimplePatternTokenizerFactory.class); tokenizers.put("simplepatternsplit", SimplePatternSplitTokenizerFactory.class); + tokenizers.put("thai", ThaiTokenizerFactory.class); + tokenizers.put("ngram", NGramTokenizerFactory.class); + tokenizers.put("edgengram", EdgeNGramTokenizerFactory.class); + tokenizers.put("classic", ClassicTokenizerFactory.class); + tokenizers.put("letter", LetterTokenizerFactory.class); + tokenizers.put("lowercase", LowerCaseTokenizerFactory.class); + tokenizers.put("pathhierarchy", PathHierarchyTokenizerFactory.class); + tokenizers.put("pattern", PatternTokenizerFactory.class); + tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class); + tokenizers.put("whitespace", WhitespaceTokenizerFactory.class); return tokenizers; } @@ -211,10 +222,25 @@ protected Map> getPreConfiguredTokenFilters() { @Override protected Map> getPreConfiguredTokenizers() { - Map> filters = new TreeMap<>(super.getPreConfiguredTokenizers()); - filters.put("keyword", null); - filters.put("lowercase", null); - return filters; + Map> tokenizers = new TreeMap<>(super.getPreConfiguredTokenizers()); + tokenizers.put("keyword", null); + tokenizers.put("lowercase", null); + tokenizers.put("classic", null); + tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class); + tokenizers.put("path_hierarchy", null); + tokenizers.put("letter", null); + tokenizers.put("whitespace", null); + tokenizers.put("ngram", null); + tokenizers.put("edge_ngram", null); + tokenizers.put("pattern", null); + tokenizers.put("thai", null); + + // TODO drop aliases once they are moved to module + tokenizers.put("nGram", tokenizers.get("ngram")); + tokenizers.put("edgeNGram", tokenizers.get("edge_ngram")); + tokenizers.put("PathHierarchy", tokenizers.get("path_hierarchy")); + + return tokenizers; } /** diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonGramsTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonGramsTokenFilterFactoryTests.java index 8efc0d5941f9e..2453ecd1e7f12 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonGramsTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonGramsTokenFilterFactoryTests.java @@ -45,7 +45,7 @@ public void testDefault() throws IOException { .build(); try { - AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin()); Assert.fail("[common_words] or [common_words_path] is set"); } catch (IllegalArgumentException e) { } catch (IOException e) { diff --git a/server/src/test/java/org/elasticsearch/index/query/DisableGraphQueryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/DisableGraphQueryTests.java similarity index 85% rename from server/src/test/java/org/elasticsearch/index/query/DisableGraphQueryTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/DisableGraphQueryTests.java index 30ecb9034354e..d1792e94f7331 100644 --- a/server/src/test/java/org/elasticsearch/index/query/DisableGraphQueryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/DisableGraphQueryTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.query; +package org.elasticsearch.analysis.common; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; @@ -29,12 +29,22 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.QueryStringQueryBuilder; +import org.elasticsearch.index.query.SimpleQueryStringBuilder; +import org.elasticsearch.index.query.SimpleQueryStringFlag; import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; import static org.hamcrest.Matchers.equalTo; @@ -49,6 +59,11 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase { private static Query expectedQueryWithUnigram; private static Query expectedPhraseQueryWithUnigram; + @Override + protected Collection> getPlugins() { + return Collections.singleton(CommonAnalysisPlugin.class); + } + @Before public void setup() { Settings settings = Settings.builder() @@ -150,42 +165,42 @@ public void cleanup() { public void testMatchPhraseQuery() throws IOException { MatchPhraseQueryBuilder builder = new MatchPhraseQueryBuilder("text_shingle_unigram", "foo bar baz"); - Query query = builder.doToQuery(shardContext); + Query query = builder.toQuery(shardContext); assertThat(expectedPhraseQueryWithUnigram, equalTo(query)); builder = new MatchPhraseQueryBuilder("text_shingle", "foo bar baz biz"); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQuery, equalTo(query)); } public void testMatchQuery() throws IOException { MatchQueryBuilder builder = new MatchQueryBuilder("text_shingle_unigram", "foo bar baz"); - Query query = builder.doToQuery(shardContext); + Query query = builder.toQuery(shardContext); assertThat(expectedQueryWithUnigram, equalTo(query)); builder = new MatchQueryBuilder("text_shingle", "foo bar baz biz"); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedQuery, equalTo(query)); } public void testMultiMatchQuery() throws IOException { MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar baz", "text_shingle_unigram"); - Query query = builder.doToQuery(shardContext); + Query query = builder.toQuery(shardContext); assertThat(expectedQueryWithUnigram, equalTo(query)); builder.type(MatchQuery.Type.PHRASE); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQueryWithUnigram, equalTo(query)); builder = new MultiMatchQueryBuilder("foo bar baz biz", "text_shingle"); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedQuery, equalTo(query)); builder.type(MatchQuery.Type.PHRASE); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQuery, equalTo(query)); } @@ -193,47 +208,47 @@ public void testSimpleQueryString() throws IOException { SimpleQueryStringBuilder builder = new SimpleQueryStringBuilder("foo bar baz"); builder.field("text_shingle_unigram"); builder.flags(SimpleQueryStringFlag.NONE); - Query query = builder.doToQuery(shardContext); + Query query = builder.toQuery(shardContext); assertThat(expectedQueryWithUnigram, equalTo(query)); builder = new SimpleQueryStringBuilder("\"foo bar baz\""); builder.field("text_shingle_unigram"); builder.flags(SimpleQueryStringFlag.PHRASE); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQueryWithUnigram, equalTo(query)); builder = new SimpleQueryStringBuilder("foo bar baz biz"); builder.field("text_shingle"); builder.flags(SimpleQueryStringFlag.NONE); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedQuery, equalTo(query)); builder = new SimpleQueryStringBuilder("\"foo bar baz biz\""); builder.field("text_shingle"); builder.flags(SimpleQueryStringFlag.PHRASE); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQuery, equalTo(query)); } public void testQueryString() throws IOException { QueryStringQueryBuilder builder = new QueryStringQueryBuilder("foo bar baz"); builder.field("text_shingle_unigram"); - Query query = builder.doToQuery(shardContext); + Query query = builder.toQuery(shardContext); assertThat(expectedQueryWithUnigram, equalTo(query)); builder = new QueryStringQueryBuilder("\"foo bar baz\""); builder.field("text_shingle_unigram"); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQueryWithUnigram, equalTo(query)); builder = new QueryStringQueryBuilder("foo bar baz biz"); builder.field("text_shingle"); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedQuery, equalTo(query)); builder = new QueryStringQueryBuilder("\"foo bar baz biz\""); builder.field("text_shingle"); - query = builder.doToQuery(shardContext); + query = builder.toQuery(shardContext); assertThat(expectedPhraseQuery, equalTo(query)); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 3c6250eacfa66..078e0a9cb9ed3 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -30,8 +30,6 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory; -import org.elasticsearch.index.analysis.NGramTokenizerFactory; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactoryTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactoryTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactoryTests.java index 39b96a2cae454..0b545d3355201 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PathHierarchyTokenizerFactoryTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import com.carrotsearch.randomizedtesting.generators.RandomPicks; diff --git a/server/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java similarity index 95% rename from server/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java index 36c9dee10919f..b66f0e1a7f13a 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/SynonymsAnalysisTests.java @@ -17,15 +17,13 @@ * under the License. */ -package org.elasticsearch.index.analysis.synonyms; +package org.elasticsearch.analysis.common; -import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; @@ -44,7 +42,6 @@ import static org.hamcrest.Matchers.startsWith; public class SynonymsAnalysisTests extends ESTestCase { - protected final Logger logger = Loggers.getLogger(getClass()); private IndexAnalyzers indexAnalyzers; public void testSynonymsAnalysis() throws IOException { @@ -56,14 +53,14 @@ public void testSynonymsAnalysis() throws IOException { Files.copy(synonyms, config.resolve("synonyms.txt")); Files.copy(synonymsWordnet, config.resolve("synonyms_wordnet.txt")); - String json = "/org/elasticsearch/index/analysis/synonyms/synonyms.json"; + String json = "/org/elasticsearch/analysis/common/synonyms.json"; Settings settings = Settings.builder(). loadFromStream(json, getClass().getResourceAsStream(json), false) .put(Environment.PATH_HOME_SETTING.getKey(), home) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); - indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers; + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!"); match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!"); @@ -91,7 +88,7 @@ public void testSynonymWordDeleteByAnalyzer() throws IOException { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); try { - indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers; + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; fail("fail! due to synonym word deleted by analyzer"); } catch (Exception e) { assertThat(e, instanceOf(IllegalArgumentException.class)); @@ -112,7 +109,7 @@ public void testExpandSynonymWordDeleteByAnalyzer() throws IOException { .build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); try { - indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers; + indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers; fail("fail! due to synonym word deleted by analyzer"); } catch (Exception e) { assertThat(e, instanceOf(IllegalArgumentException.class)); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactoryTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactoryTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactoryTests.java index 6dbb5e174b145..f34b694fbf60f 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/WhitespaceTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WhitespaceTokenizerFactoryTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import com.carrotsearch.randomizedtesting.generators.RandomStrings; diff --git a/server/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.json b/modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/synonyms.json similarity index 100% rename from server/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.json rename to modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/synonyms.json diff --git a/server/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.txt b/modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/synonyms.txt similarity index 100% rename from server/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms.txt rename to modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/synonyms.txt diff --git a/server/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt b/modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/synonyms_wordnet.txt similarity index 100% rename from server/src/test/resources/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt rename to modules/analysis-common/src/test/resources/org/elasticsearch/analysis/common/synonyms_wordnet.txt diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml index e6b69db8a0eb9..cffd4496f1fb7 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/30_tokenizers.yml @@ -70,3 +70,374 @@ - match: { detail.tokenizer.name: _anonymous_tokenizer } - match: { detail.tokenizer.tokens.0.token: foo } - match: { detail.tokenizer.tokens.1.token: bar } + +--- +"thai_tokenizer": + - do: + indices.analyze: + body: + text: "ภาษาไทย" + explain: true + tokenizer: + type: thai + - length: { detail.tokenizer.tokens: 2 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: ภาษา } + - match: { detail.tokenizer.tokens.1.token: ไทย } + + - do: + indices.analyze: + body: + text: "ภาษาไทย" + explain: true + tokenizer: thai + - length: { detail.tokenizer.tokens: 2 } + - match: { detail.tokenizer.name: thai } + - match: { detail.tokenizer.tokens.0.token: ภาษา } + - match: { detail.tokenizer.tokens.1.token: ไทย } + +--- +"ngram": + - do: + indices.analyze: + body: + text: "foobar" + explain: true + tokenizer: + type: ngram + min_gram: 3 + max_gram: 3 + - length: { detail.tokenizer.tokens: 4 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: foo } + - match: { detail.tokenizer.tokens.1.token: oob } + - match: { detail.tokenizer.tokens.2.token: oba } + - match: { detail.tokenizer.tokens.3.token: bar } + + - do: + indices.analyze: + body: + text: "foobar" + explain: true + tokenizer: + type: nGram + min_gram: 3 + max_gram: 3 + - length: { detail.tokenizer.tokens: 4 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: foo } + - match: { detail.tokenizer.tokens.1.token: oob } + - match: { detail.tokenizer.tokens.2.token: oba } + - match: { detail.tokenizer.tokens.3.token: bar } + + - do: + indices.analyze: + body: + text: "foo" + explain: true + tokenizer: ngram + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: ngram } + - match: { detail.tokenizer.tokens.0.token: f } + - match: { detail.tokenizer.tokens.1.token: fo } + - match: { detail.tokenizer.tokens.2.token: o } + - match: { detail.tokenizer.tokens.3.token: oo } + - match: { detail.tokenizer.tokens.4.token: o } + + - do: + indices.analyze: + body: + text: "foo" + explain: true + tokenizer: nGram + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: nGram } + - match: { detail.tokenizer.tokens.0.token: f } + - match: { detail.tokenizer.tokens.1.token: fo } + - match: { detail.tokenizer.tokens.2.token: o } + - match: { detail.tokenizer.tokens.3.token: oo } + - match: { detail.tokenizer.tokens.4.token: o } + +--- +"edge_ngram": + - do: + indices.analyze: + body: + text: "foo" + explain: true + tokenizer: + type: edge_ngram + min_gram: 1 + max_gram: 3 + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: f } + - match: { detail.tokenizer.tokens.1.token: fo } + - match: { detail.tokenizer.tokens.2.token: foo } + + - do: + indices.analyze: + body: + text: "foo" + explain: true + tokenizer: + type: edgeNGram + min_gram: 1 + max_gram: 3 + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: f } + - match: { detail.tokenizer.tokens.1.token: fo } + - match: { detail.tokenizer.tokens.2.token: foo } + + - do: + indices.analyze: + body: + text: "foo" + explain: true + tokenizer: edge_ngram + - length: { detail.tokenizer.tokens: 2 } + - match: { detail.tokenizer.name: edge_ngram } + - match: { detail.tokenizer.tokens.0.token: f } + - match: { detail.tokenizer.tokens.1.token: fo } + + - do: + indices.analyze: + body: + text: "foo" + explain: true + tokenizer: edgeNGram + - length: { detail.tokenizer.tokens: 2 } + - match: { detail.tokenizer.name: edgeNGram } + - match: { detail.tokenizer.tokens.0.token: f } + - match: { detail.tokenizer.tokens.1.token: fo } + +--- +"classic": + - do: + indices.analyze: + body: + text: "Brown-Foxes don't jump." + explain: true + tokenizer: + type: classic + - length: { detail.tokenizer.tokens: 4 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: Brown } + - match: { detail.tokenizer.tokens.1.token: Foxes } + - match: { detail.tokenizer.tokens.2.token: don't } + - match: { detail.tokenizer.tokens.3.token: jump } + + - do: + indices.analyze: + body: + text: "Brown-Foxes don't jump." + explain: true + tokenizer: classic + - length: { detail.tokenizer.tokens: 4 } + - match: { detail.tokenizer.name: classic } + - match: { detail.tokenizer.tokens.0.token: Brown } + - match: { detail.tokenizer.tokens.1.token: Foxes } + - match: { detail.tokenizer.tokens.2.token: don't } + - match: { detail.tokenizer.tokens.3.token: jump } + +--- +"letter": + - do: + indices.analyze: + body: + text: "Brown-Foxes don't jump." + explain: true + tokenizer: + type: letter + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: Brown } + - match: { detail.tokenizer.tokens.1.token: Foxes } + - match: { detail.tokenizer.tokens.2.token: don } + - match: { detail.tokenizer.tokens.3.token: t } + - match: { detail.tokenizer.tokens.4.token: jump } + + - do: + indices.analyze: + body: + text: "Brown-Foxes don't jump." + explain: true + tokenizer: letter + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: letter } + - match: { detail.tokenizer.tokens.0.token: Brown } + - match: { detail.tokenizer.tokens.1.token: Foxes } + - match: { detail.tokenizer.tokens.2.token: don } + - match: { detail.tokenizer.tokens.3.token: t } + - match: { detail.tokenizer.tokens.4.token: jump } + +--- +"lowercase": + - do: + indices.analyze: + body: + text: "Brown-Foxes don't jump." + explain: true + tokenizer: + type: lowercase + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: brown } + - match: { detail.tokenizer.tokens.1.token: foxes } + - match: { detail.tokenizer.tokens.2.token: don } + - match: { detail.tokenizer.tokens.3.token: t } + - match: { detail.tokenizer.tokens.4.token: jump } + + - do: + indices.analyze: + body: + text: "Brown-Foxes don't jump." + explain: true + tokenizer: lowercase + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: lowercase } + - match: { detail.tokenizer.tokens.0.token: brown } + - match: { detail.tokenizer.tokens.1.token: foxes } + - match: { detail.tokenizer.tokens.2.token: don } + - match: { detail.tokenizer.tokens.3.token: t } + - match: { detail.tokenizer.tokens.4.token: jump } + +--- +"path_hierarchy": + - do: + indices.analyze: + body: + text: "a/b/c" + explain: true + tokenizer: + type: path_hierarchy + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: a } + - match: { detail.tokenizer.tokens.1.token: a/b } + - match: { detail.tokenizer.tokens.2.token: a/b/c } + + - do: + indices.analyze: + body: + text: "a/b/c" + explain: true + tokenizer: + type: PathHierarchy + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: a } + - match: { detail.tokenizer.tokens.1.token: a/b } + - match: { detail.tokenizer.tokens.2.token: a/b/c } + + - do: + indices.analyze: + body: + text: "a/b/c" + explain: true + tokenizer: path_hierarchy + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: path_hierarchy } + - match: { detail.tokenizer.tokens.0.token: a } + - match: { detail.tokenizer.tokens.1.token: a/b } + - match: { detail.tokenizer.tokens.2.token: a/b/c } + + - do: + indices.analyze: + body: + text: "a/b/c" + explain: true + tokenizer: PathHierarchy + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: PathHierarchy } + - match: { detail.tokenizer.tokens.0.token: a } + - match: { detail.tokenizer.tokens.1.token: a/b } + - match: { detail.tokenizer.tokens.2.token: a/b/c } + +--- +"pattern": + - do: + indices.analyze: + body: + text: "split by whitespace by default" + explain: true + tokenizer: + type: pattern + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: split } + - match: { detail.tokenizer.tokens.1.token: by } + - match: { detail.tokenizer.tokens.2.token: whitespace } + - match: { detail.tokenizer.tokens.3.token: by } + - match: { detail.tokenizer.tokens.4.token: default } + + - do: + indices.analyze: + body: + text: "split by whitespace by default" + explain: true + tokenizer: pattern + - length: { detail.tokenizer.tokens: 5 } + - match: { detail.tokenizer.name: pattern } + - match: { detail.tokenizer.tokens.0.token: split } + - match: { detail.tokenizer.tokens.1.token: by } + - match: { detail.tokenizer.tokens.2.token: whitespace } + - match: { detail.tokenizer.tokens.3.token: by } + - match: { detail.tokenizer.tokens.4.token: default } + +--- +"uax_url_email": + - do: + indices.analyze: + body: + text: "Email me at john.smith@global-international.com" + explain: true + tokenizer: + type: uax_url_email + - length: { detail.tokenizer.tokens: 4 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: Email } + - match: { detail.tokenizer.tokens.1.token: me } + - match: { detail.tokenizer.tokens.2.token: at } + - match: { detail.tokenizer.tokens.3.token: john.smith@global-international.com } + + - do: + indices.analyze: + body: + text: "Email me at john.smith@global-international.com" + explain: true + tokenizer: uax_url_email + - length: { detail.tokenizer.tokens: 4 } + - match: { detail.tokenizer.name: uax_url_email } + - match: { detail.tokenizer.tokens.0.token: Email } + - match: { detail.tokenizer.tokens.1.token: me } + - match: { detail.tokenizer.tokens.2.token: at } + - match: { detail.tokenizer.tokens.3.token: john.smith@global-international.com } + +--- +"whitespace": + - do: + indices.analyze: + body: + text: "split by whitespace" + explain: true + tokenizer: + type: whitespace + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: _anonymous_tokenizer } + - match: { detail.tokenizer.tokens.0.token: split } + - match: { detail.tokenizer.tokens.1.token: by } + - match: { detail.tokenizer.tokens.2.token: whitespace } + + - do: + indices.analyze: + body: + text: "split by whitespace" + explain: true + tokenizer: whitespace + - length: { detail.tokenizer.tokens: 3 } + - match: { detail.tokenizer.name: whitespace } + - match: { detail.tokenizer.tokens.0.token: split } + - match: { detail.tokenizer.tokens.1.token: by } + - match: { detail.tokenizer.tokens.2.token: whitespace } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 611c6703ebc61..1737d743a6d1c 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -67,3 +67,33 @@ text: "foo" - length: { tokens: 1 } - match: { tokens.0.token: "\nfoo\n" } + +--- +"Synonym filter with tokenizer": + - do: + indices.create: + index: test_synonym + body: + settings: + index: + analysis: + tokenizer: + trigram: + type: nGram + min_gram: 3 + max_gram: 3 + filter: + synonym: + type: synonym + synonyms: ["kimchy => shay"] + + - do: + indices.analyze: + index: test_synonym + body: + tokenizer: trigram + filter: [synonym] + text: kimchy + - length: { tokens: 2 } + - match: { tokens.0.token: sha } + - match: { tokens.1.token: hay } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml index eb8c9789a63ce..ec7b9493ac07e 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/search.query/20_ngram_search.yml @@ -39,3 +39,97 @@ text: query: foa - match: {hits.total: 1} + +--- +"testNGramCopyField": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + max_ngram_diff: 9 + analysis: + analyzer: + my_ngram_analyzer: + tokenizer: my_ngram_tokenizer + tokenizer: + my_ngram_tokenizer: + type: ngram + min: 1, + max: 10 + token_chars: [] + mappings: + doc: + properties: + origin: + type: text + copy_to: meta + meta: + type: text + analyzer: my_ngram_analyzer + + - do: + index: + index: test + type: doc + id: 1 + body: { "origin": "C.A1234.5678" } + refresh: true + + - do: + search: + body: + query: + match: + meta: + query: 1234 + - match: {hits.total: 1} + + - do: + search: + body: + query: + match: + meta: + query: 1234.56 + - match: {hits.total: 1} + + - do: + search: + body: + query: + match: + meta: + query: A1234 + - match: {hits.total: 1} + + - do: + search: + body: + query: + term: + meta: + value: a1234 + - match: {hits.total: 0} + + - do: + search: + body: + query: + match: + meta: + query: A1234 + analyzer: my_ngram_analyzer + - match: {hits.total: 1} + + - do: + search: + body: + query: + match: + meta: + query: a1234 + analyzer: my_ngram_analyzer + - match: {hits.total: 1} diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java index 8e0828fcfcaea..22875139c9beb 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java @@ -57,7 +57,7 @@ public class RankEvalSpec implements Writeable, ToXContentObject { /** Default max number of requests. */ private static final int MAX_CONCURRENT_SEARCHES = 10; /** optional: Templates to base test requests on */ - private Map templates = new HashMap<>(); + private final Map templates = new HashMap<>(); public RankEvalSpec(List ratedRequests, EvaluationMetric metric, Collection templates) { this.metric = Objects.requireNonNull(metric, "Cannot evaluate ranking if no evaluation metric is provided."); @@ -68,8 +68,8 @@ public RankEvalSpec(List ratedRequests, EvaluationMetric metric, C this.ratedRequests = ratedRequests; if (templates == null || templates.isEmpty()) { for (RatedRequest request : ratedRequests) { - if (request.getTestRequest() == null) { - throw new IllegalStateException("Cannot evaluate ranking if neither template nor test request is " + if (request.getEvaluationRequest() == null) { + throw new IllegalStateException("Cannot evaluate ranking if neither template nor evaluation request is " + "provided. Seen for request id: " + request.getId()); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 392ce5d0633a0..79dd693b3ac3c 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -75,9 +75,12 @@ public class RatedRequest implements Writeable, ToXContentObject { private final String id; private final List summaryFields; private final List ratedDocs; - // Search request to execute for this rated request. This can be null if template and corresponding parameters are supplied. + /** + * Search request to execute for this rated request. This can be null in + * case the query is supplied as a template with corresponding parameters + */ @Nullable - private SearchSourceBuilder testRequest; + private final SearchSourceBuilder evaluationRequest; /** * Map of parameters to use for filling a query template, can be used * instead of providing testRequest. @@ -86,27 +89,49 @@ public class RatedRequest implements Writeable, ToXContentObject { @Nullable private String templateId; - private RatedRequest(String id, List ratedDocs, SearchSourceBuilder testRequest, + /** + * Create a rated request with template ids and parameters. + * + * @param id a unique name for this rated request + * @param ratedDocs a list of document ratings + * @param params template parameters + * @param templateId a templare id + */ + public RatedRequest(String id, List ratedDocs, Map params, + String templateId) { + this(id, ratedDocs, null, params, templateId); + } + + /** + * Create a rated request using a {@link SearchSourceBuilder} to define the + * evaluated query. + * + * @param id a unique name for this rated request + * @param ratedDocs a list of document ratings + * @param evaluatedQuery the query that is evaluated + */ + public RatedRequest(String id, List ratedDocs, SearchSourceBuilder evaluatedQuery) { + this(id, ratedDocs, evaluatedQuery, new HashMap<>(), null); + } + + private RatedRequest(String id, List ratedDocs, SearchSourceBuilder evaluatedQuery, Map params, String templateId) { - if (params != null && (params.size() > 0 && testRequest != null)) { + if (params != null && (params.size() > 0 && evaluatedQuery != null)) { throw new IllegalArgumentException( - "Ambiguous rated request: Set both, verbatim test request and test request " - + "template parameters."); + "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters."); } - if (templateId != null && testRequest != null) { + if (templateId != null && evaluatedQuery != null) { throw new IllegalArgumentException( - "Ambiguous rated request: Set both, verbatim test request and test request " - + "template parameters."); + "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters."); } - if ((params == null || params.size() < 1) && testRequest == null) { - throw new IllegalArgumentException( - "Need to set at least test request or test request template parameters."); + if ((params == null || params.size() < 1) && evaluatedQuery == null) { + throw new IllegalArgumentException("Need to set at least test request or test request template parameters."); } if ((params != null && params.size() > 0) && templateId == null) { - throw new IllegalArgumentException( - "If template parameters are supplied need to set id of template to apply " - + "them to too."); + throw new IllegalArgumentException("If template parameters are supplied need to set id of template to apply " + "them to too."); } + validateEvaluatedQuery(evaluatedQuery); + // check that not two documents with same _index/id are specified Set docKeys = new HashSet<>(); for (RatedDocument doc : ratedDocs) { @@ -118,7 +143,7 @@ private RatedRequest(String id, List ratedDocs, SearchSourceBuild } this.id = id; - this.testRequest = testRequest; + this.evaluationRequest = evaluatedQuery; this.ratedDocs = new ArrayList<>(ratedDocs); if (params != null) { this.params = new HashMap<>(params); @@ -129,18 +154,30 @@ private RatedRequest(String id, List ratedDocs, SearchSourceBuild this.summaryFields = new ArrayList<>(); } - public RatedRequest(String id, List ratedDocs, Map params, - String templateId) { - this(id, ratedDocs, null, params, templateId); - } - - public RatedRequest(String id, List ratedDocs, SearchSourceBuilder testRequest) { - this(id, ratedDocs, testRequest, new HashMap<>(), null); + static void validateEvaluatedQuery(SearchSourceBuilder evaluationRequest) { + // ensure that testRequest, if set, does not contain aggregation, suggest or highlighting section + if (evaluationRequest != null) { + if (evaluationRequest.suggest() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain a suggest section."); + } + if (evaluationRequest.aggregations() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain aggregations."); + } + if (evaluationRequest.highlighter() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain a highlighter section."); + } + if (evaluationRequest.explain() != null && evaluationRequest.explain()) { + throw new IllegalArgumentException("Query in rated requests should not use explain."); + } + if (evaluationRequest.profile()) { + throw new IllegalArgumentException("Query in rated requests should not use profile."); + } + } } - public RatedRequest(StreamInput in) throws IOException { + RatedRequest(StreamInput in) throws IOException { this.id = in.readString(); - testRequest = in.readOptionalWriteable(SearchSourceBuilder::new); + evaluationRequest = in.readOptionalWriteable(SearchSourceBuilder::new); int intentSize = in.readInt(); ratedDocs = new ArrayList<>(intentSize); @@ -159,7 +196,7 @@ public RatedRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - out.writeOptionalWriteable(testRequest); + out.writeOptionalWriteable(evaluationRequest); out.writeInt(ratedDocs.size()); for (RatedDocument ratedDoc : ratedDocs) { @@ -173,8 +210,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(this.templateId); } - public SearchSourceBuilder getTestRequest() { - return testRequest; + public SearchSourceBuilder getEvaluationRequest() { + return evaluationRequest; } /** return the user supplied request id */ @@ -240,8 +277,8 @@ public static RatedRequest fromXContent(XContentParser parser) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(ID_FIELD.getPreferredName(), this.id); - if (testRequest != null) { - builder.field(REQUEST_FIELD.getPreferredName(), this.testRequest); + if (evaluationRequest != null) { + builder.field(REQUEST_FIELD.getPreferredName(), this.evaluationRequest); } builder.startArray(RATINGS_FIELD.getPreferredName()); for (RatedDocument doc : this.ratedDocs) { @@ -285,7 +322,7 @@ public final boolean equals(Object obj) { RatedRequest other = (RatedRequest) obj; - return Objects.equals(id, other.id) && Objects.equals(testRequest, other.testRequest) + return Objects.equals(id, other.id) && Objects.equals(evaluationRequest, other.evaluationRequest) && Objects.equals(summaryFields, other.summaryFields) && Objects.equals(ratedDocs, other.ratedDocs) && Objects.equals(params, other.params) @@ -294,7 +331,7 @@ public final boolean equals(Object obj) { @Override public final int hashCode() { - return Objects.hash(id, testRequest, summaryFields, ratedDocs, params, + return Objects.hash(id, evaluationRequest, summaryFields, ratedDocs, params, templateId); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index 019ae274466ab..e0a0b3ea13378 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -52,6 +52,7 @@ import java.util.concurrent.ConcurrentHashMap; import static org.elasticsearch.common.xcontent.XContentHelper.createParser; +import static org.elasticsearch.index.rankeval.RatedRequest.validateEvaluatedQuery; /** * Instances of this class execute a collection of search intents (read: user @@ -99,15 +100,17 @@ protected void doExecute(RankEvalRequest request, ActionListener ratedRequestsInSearch = new ArrayList<>(); for (RatedRequest ratedRequest : ratedRequests) { - SearchSourceBuilder ratedSearchSource = ratedRequest.getTestRequest(); - if (ratedSearchSource == null) { + SearchSourceBuilder evaluationRequest = ratedRequest.getEvaluationRequest(); + if (evaluationRequest == null) { Map params = ratedRequest.getParams(); String templateId = ratedRequest.getTemplateId(); TemplateScript.Factory templateScript = scriptsWithoutParams.get(templateId); String resolvedRequest = templateScript.newInstance(params).execute(); try (XContentParser subParser = createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentType.JSON)) { - ratedSearchSource = SearchSourceBuilder.fromXContent(subParser, false); + evaluationRequest = SearchSourceBuilder.fromXContent(subParser, false); + // check for parts that should not be part of a ranking evaluation request + validateEvaluatedQuery(evaluationRequest); } catch (IOException e) { // if we fail parsing, put the exception into the errors map and continue errors.put(ratedRequest.getId(), e); @@ -116,17 +119,17 @@ LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentTyp } if (metric.forcedSearchSize().isPresent()) { - ratedSearchSource.size(metric.forcedSearchSize().get()); + evaluationRequest.size(metric.forcedSearchSize().get()); } ratedRequestsInSearch.add(ratedRequest); List summaryFields = ratedRequest.getSummaryFields(); if (summaryFields.isEmpty()) { - ratedSearchSource.fetchSource(false); + evaluationRequest.fetchSource(false); } else { - ratedSearchSource.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); + evaluationRequest.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); } - SearchRequest searchRequest = new SearchRequest(request.indices(), ratedSearchSource); + SearchRequest searchRequest = new SearchRequest(request.indices(), evaluationRequest); searchRequest.indicesOptions(request.indicesOptions()); msearchRequest.add(searchRequest); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 196b50b7f6163..084f29b8c9a87 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -33,7 +33,11 @@ import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -165,7 +169,7 @@ public void testEqualsAndHash() throws IOException { private static RatedRequest mutateTestItem(RatedRequest original) { String id = original.getId(); - SearchSourceBuilder testRequest = original.getTestRequest(); + SearchSourceBuilder evaluationRequest = original.getEvaluationRequest(); List ratedDocs = original.getRatedDocs(); Map params = original.getParams(); List summaryFields = original.getSummaryFields(); @@ -177,11 +181,11 @@ private static RatedRequest mutateTestItem(RatedRequest original) { id = randomValueOtherThan(id, () -> randomAlphaOfLength(10)); break; case 1: - if (testRequest != null) { - int size = randomValueOtherThan(testRequest.size(), () -> randomInt(Integer.MAX_VALUE)); - testRequest = new SearchSourceBuilder(); - testRequest.size(size); - testRequest.query(new MatchAllQueryBuilder()); + if (evaluationRequest != null) { + int size = randomValueOtherThan(evaluationRequest.size(), () -> randomInt(Integer.MAX_VALUE)); + evaluationRequest = new SearchSourceBuilder(); + evaluationRequest.size(size); + evaluationRequest.query(new MatchAllQueryBuilder()); } else { if (randomBoolean()) { Map mutated = new HashMap<>(); @@ -204,10 +208,10 @@ private static RatedRequest mutateTestItem(RatedRequest original) { } RatedRequest ratedRequest; - if (testRequest == null) { + if (evaluationRequest == null) { ratedRequest = new RatedRequest(id, ratedDocs, params, templateId); } else { - ratedRequest = new RatedRequest(id, ratedDocs, testRequest); + ratedRequest = new RatedRequest(id, ratedDocs, evaluationRequest); } ratedRequest.addSummaryFields(summaryFields); @@ -258,6 +262,44 @@ public void testSettingTemplateIdNoParamsThrows() { expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, null, "templateId")); } + public void testAggsNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.aggregation(AggregationBuilders.terms("fieldName")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain aggregations.", e.getMessage()); + } + + public void testSuggestionsNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.suggest(new SuggestBuilder().addSuggestion("id", SuggestBuilders.completionSuggestion("fieldname"))); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain a suggest section.", e.getMessage()); + } + + public void testHighlighterNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.highlighter(new HighlightBuilder().field("field")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain a highlighter section.", e.getMessage()); + } + + public void testExplainNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().explain(true))); + assertEquals("Query in rated requests should not use explain.", e.getMessage()); + } + + public void testProfileNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().profile(true))); + assertEquals("Query in rated requests should not use profile.", e.getMessage()); + } + /** * test that modifying the order of index/docId to make sure it doesn't * matter for parsing xContent @@ -287,7 +329,7 @@ public void testParseFromXContent() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, querySpecString)) { RatedRequest specification = RatedRequest.fromXContent(parser); assertEquals("my_qa_query", specification.getId()); - assertNotNull(specification.getTestRequest()); + assertNotNull(specification.getEvaluationRequest()); List ratedDocs = specification.getRatedDocs(); assertEquals(3, ratedDocs.size()); for (int i = 0; i < 3; i++) { diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml index 605891d2b32d3..70e78f7e36b37 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml @@ -1,5 +1,12 @@ --- "Response format for search failures": + - do: + indices.create: + index: source + body: + settings: + index.number_of_shards: 2 + - do: index: index: source @@ -26,7 +33,7 @@ - match: {updated: 0} - match: {version_conflicts: 0} - match: {batches: 0} - - is_true: failures.0.shard + - match: {failures.0.shard: 0} - match: {failures.0.index: source} - is_true: failures.0.node - match: {failures.0.reason.type: script_exception} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml index 8ace77eee59eb..17f422453ce18 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml @@ -1,5 +1,12 @@ --- "Response format for search failures": + - do: + indices.create: + index: source + body: + settings: + index.number_of_shards: 2 + - do: index: index: source @@ -22,7 +29,7 @@ - match: {updated: 0} - match: {version_conflicts: 0} - match: {batches: 0} - - is_true: failures.0.shard + - match: {failures.0.shard: 0} - match: {failures.0.index: source} - is_true: failures.0.node - match: {failures.0.reason.type: script_exception} diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 79fe5e7aaefa7..62aad486ad804 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -34,6 +34,7 @@ File repositoryDir = new File(project.buildDir, "shared-repository") /** A task to start the URLFixture which exposes the repositoryDir over HTTP **/ task urlFixture(type: AntFixture) { + dependsOn testClasses doFirst { repositoryDir.mkdirs() } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index ab0c271f3ae4f..c8c2c4829d2cf 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -122,8 +122,43 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem Netty4Utils.setup(); } + /* + * Size in bytes of an individual message received by io.netty.handler.codec.MessageAggregator which accumulates the content for an + * HTTP request. This number is used for estimating the maximum number of allowed buffers before the MessageAggregator's internal + * collection of buffers is resized. + * + * By default we assume the Ethernet MTU (1500 bytes) but users can override it with a system property. + */ + private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("es.net.mtu", "1500"))); + + private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; + public static Setting SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = - Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope); + new Setting<>(SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS, (s) -> { + ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(s); + /* + * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of + * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate + * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded() + * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also + * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap). + * + * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see + * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for + * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The + * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU). + * We assume here that Elasticsearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes. + * + * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity. + * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers. + * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries. + */ + long maxBufferComponentsEstimate = Math.round((double) (maxContentLength.getBytes() / MTU.getBytes())); + // clamp value to the allowed range + long maxBufferComponents = Math.max(2, Math.min(maxBufferComponentsEstimate, Integer.MAX_VALUE)); + return String.valueOf(maxBufferComponents); + // Netty's CompositeByteBuf implementation does not allow less than two components. + }, s -> Setting.parseInt(s, 2, Integer.MAX_VALUE, SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS), Property.NodeScope); public static final Setting SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count", (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), @@ -236,8 +271,9 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic this.maxContentLength = maxContentLength; logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + - "receive_predictor[{}], pipelining[{}], pipelining_max_events[{}]", - maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, pipelining, pipeliningMaxEvents); + "receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents, + pipelining, pipeliningMaxEvents); } public Settings settings() { @@ -532,9 +568,7 @@ protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); ch.pipeline().addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(Math.toIntExact(transport.maxContentLength.getBytes())); - if (transport.maxCompositeBufferComponents != -1) { - aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); - } + aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); ch.pipeline().addLast("aggregator", aggregator); if (transport.compression) { ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel)); diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 8231e15af200c..3c94f4ace7759 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -230,6 +230,11 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { fixtureSupported = true } +boolean legalPath = rootProject.rootDir.toString().contains(" ") == false +if (legalPath == false) { + fixtureSupported = false +} + // Always ignore HA integration tests in the normal integration test runner, they are included below as // part of their own HA-specific integration test tasks. integTestRunner.exclude('**/Ha*TestSuiteIT.class') @@ -248,7 +253,12 @@ if (fixtureSupported) { // Only include the HA integration tests for the HA test task integTestHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) } else { - logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") + if (legalPath) { + logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") + } else { + logger.warn("hdfsFixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'") + } + // The normal integration test runner will just test that the plugin loads integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic' // HA fixture is unsupported. Don't run them. diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index fa9cda06589c6..926cf0b2ad4af 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -100,14 +100,9 @@ public InputStream readBlob(String blobName) throws IOException { @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - if (blobExists(blobName)) { - throw new FileAlreadyExistsException("blob [" + blobName + "] already exists, cannot overwrite"); - } store.execute((Operation) fileContext -> { Path blob = new Path(path, blobName); // we pass CREATE, which means it fails if a blob already exists. - // NOTE: this behavior differs from FSBlobContainer, which passes TRUNCATE_EXISTING - // that should be fixed there, no need to bring truncation into this, give the user an error. EnumSet flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK); CreateOpts[] opts = {CreateOpts.bufferSize(bufferSize)}; try (FSDataOutputStream stream = fileContext.create(blob, flags, opts)) { @@ -121,6 +116,8 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t // if true synchronous behavior is required" stream.hsync(); } + } catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) { + throw new FileAlreadyExistsException(blob.toString(), null, faee.getMessage()); } return null; }); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 7726a1df0b10d..8617ecc1fe28a 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -161,7 +161,7 @@ search: index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 - - match: { _shards.total: 8 } + - match: { _shards.total: 4 } - match: { hits.total: 2 } - match: { hits.hits.0._source.filter_field: 1 } - match: { hits.hits.0._index: "my_remote_cluster:test_index" } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml index d37bb5a182586..c2840c1ce98e8 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -27,6 +27,8 @@ indices.create: index: field_caps_index_1 body: + settings: + index.number_of_shards: 1 mappings: t: properties: @@ -51,6 +53,8 @@ indices.create: index: field_caps_index_3 body: + settings: + index.number_of_shards: 1 mappings: t: properties: diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java index 50860ddd87b21..0ad78ad0c7a7e 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -106,6 +107,43 @@ public void testPrecisionAtRequest() throws IOException { assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); } + public void testTemplateWithAggsFails() { + String template = "{ \"aggs\" : { \"avg_grade\" : { \"avg\" : { \"field\" : \"grade\" }}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain aggregations."); + } + + public void testTemplateWithSuggestFails() { + String template = "{\"suggest\" : {\"my-suggestion\" : {\"text\" : \"Elastic\",\"term\" : {\"field\" : \"message\"}}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain a suggest section."); + } + + public void testTemplateWithHighlighterFails() { + String template = "{\"highlight\" : { \"fields\" : {\"content\" : {}}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain a highlighter section."); + } + + public void testTemplateWithProfileFails() { + String template = "{\"profile\" : \"true\" }"; + assertTemplatedRequestFailures(template, "Query in rated requests should not use profile."); + } + + public void testTemplateWithExplainFails() { + String template = "{\"explain\" : \"true\" }"; + assertTemplatedRequestFailures(template, "Query in rated requests should not use explain."); + } + + private static void assertTemplatedRequestFailures(String template, String expectedMessage) { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + RatedRequest ratedRequest = new RatedRequest("id", ratedDocs, Collections.singletonMap("param1", "value1"), "templateId"); + Collection templates = Collections.singletonList(new ScriptWithId("templateId", + new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, template, Collections.emptyMap()))); + RankEvalSpec rankEvalSpec = new RankEvalSpec(Collections.singletonList(ratedRequest), new PrecisionAtK(), templates); + RankEvalRequest rankEvalRequest = new RankEvalRequest(rankEvalSpec, new String[] { "test" }); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> client().execute(RankEvalAction.INSTANCE, rankEvalRequest).actionGet()); + assertEquals(expectedMessage, e.getMessage()); + } + private static List createRelevant(String... docs) { List relevant = new ArrayList<>(); for (String doc : docs) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml index 403b0b740c533..78b7a4277570a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml @@ -14,6 +14,8 @@ --- "No templates": + - skip: + features: default_shards - do: cat.templates: {} @@ -174,6 +176,8 @@ --- "Sort templates": + - skip: + features: default_shards - do: indices.put_template: name: test @@ -222,6 +226,8 @@ --- "Multiple template": + - skip: + features: default_shards - do: indices.put_template: name: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml index 824c48c8d99d7..d95417c16ca5d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.analyze/10_analyze.yml @@ -76,36 +76,6 @@ - match: { detail.tokenfilters.0.name: "_anonymous_tokenfilter" } - match: { detail.tokenfilters.0.tokens.0.token: bar } ---- -"Synonym filter with tokenizer": - - do: - indices.create: - index: test_synonym - body: - settings: - index: - analysis: - tokenizer: - trigram: - type: nGram - min_gram: 3 - max_gram: 3 - filter: - synonym: - type: synonym - synonyms: ["kimchy => shay"] - - - do: - indices.analyze: - index: test_synonym - body: - tokenizer: trigram - filter: [synonym] - text: kimchy - - length: { tokens: 2 } - - match: { tokens.0.token: sha } - - match: { tokens.1.token: hay } - --- "Custom normalizer in request": - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index f53c88bcfca2e..a88b37ead3154 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -1,5 +1,9 @@ --- "Shrink index via API": + - skip: + version: " - 6.99.99" + reason: expects warnings that pre-7.0.0 will not send + features: "warnings" # creates an index with one document solely allocated on the master node # and shrinks it into a new index with a single shard # we don't do the relocation to a single node after the index is created @@ -20,7 +24,8 @@ settings: # ensure everything is allocated on a single node index.routing.allocation.include._id: $master - number_of_replicas: 0 + index.number_of_shards: 2 + index.number_of_replicas: 0 - do: index: index: source @@ -62,6 +67,8 @@ body: settings: index.number_of_replicas: 0 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index d96e1dbdcb99b..ee7b2215d2187 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,5 +1,10 @@ --- "Shrink index ignores target template mapping": + - skip: + version: " - 6.99.99" + reason: expects warnings that pre-7.0.0 will not send + features: "warnings" + - do: cluster.state: {} # Get master node id @@ -15,7 +20,8 @@ settings: # ensure everything is allocated on a single node index.routing.allocation.include._id: $master - number_of_replicas: 0 + index.number_of_shards: 2 + index.number_of_replicas: 0 mappings: test: properties: @@ -65,6 +71,8 @@ body: settings: index.number_of_replicas: 0 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 34757427e6983..50438384b3ab0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.3.99" - reason: copy_settings did not exist prior to 6.4.0 + version: " - 6.99.99" + reason: expects warnings that pre-7.0.0 will not send features: "warnings" - do: @@ -19,6 +19,7 @@ settings: # ensure everything is allocated on the master node index.routing.allocation.include._id: $master + index.number_of_shards: 2 index.number_of_replicas: 0 index.merge.scheduler.max_merge_count: 4 @@ -47,8 +48,6 @@ settings: index.number_of_replicas: 0 index.merge.scheduler.max_thread_count: 2 - warnings: - - "parameter [copy_settings] is deprecated but was [true]" - do: cluster.health: @@ -64,20 +63,19 @@ - match: { copy-settings-target.settings.index.blocks.write: "true" } - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } - # now we do a actual shrink and do not copy settings + # now we do a actual shrink and do not copy settings (by default) - do: indices.shrink: index: "source" target: "no-copy-settings-target" wait_for_active_shards: 1 master_timeout: 10s - copy_settings: false body: settings: index.number_of_replicas: 0 index.merge.scheduler.max_thread_count: 2 warnings: - - "parameter [copy_settings] is deprecated but was [false]" + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -92,3 +90,16 @@ - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - is_false: no-copy-settings-target.settings.index.blocks.write - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id + + # now we do a actual shrink and try to set no copy settings + - do: + catch: /illegal_argument_exception/ + indices.shrink: + index: "source" + target: "explicit-no-copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: false + body: + settings: + index.number_of_replicas: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 9e32f98831dde..635673c182f2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -33,8 +33,9 @@ setup: --- "Split index via API": - skip: - version: " - 6.0.99" - reason: Added in 6.1.0 + version: " - 6.99.99" + reason: expects warnings that pre-7.0.0 will not send + features: "warnings" # make it read-only - do: @@ -60,6 +61,8 @@ setup: settings: index.number_of_replicas: 0 index.number_of_shards: 4 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -103,13 +106,13 @@ setup: --- "Split from 1 to N": -# - skip: -# version: " - 6.99.99" -# reason: Added in 7.0.0 -# uncomment once AwaitsFix is resolved - skip: + # when re-enabling uncomment the below skips version: "all" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" + # version: " - 6.99.99" + # reason: expects warnings that pre-7.0.0 will not send + features: "warnings" - do: indices.create: index: source_one_shard @@ -163,6 +166,8 @@ setup: settings: index.number_of_replicas: 0 index.number_of_shards: 5 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -208,8 +213,9 @@ setup: --- "Create illegal split indices": - skip: - version: " - 6.0.99" - reason: Added in 6.1.0 + version: " - 6.99.99" + reason: expects warnings that pre-7.0.0 will not send + features: "warnings" # try to do an illegal split with number_of_routing_shards set - do: @@ -224,6 +230,8 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 4 index.number_of_routing_shards: 8 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" # try to do an illegal split with illegal number_of_shards - do: @@ -237,3 +245,5 @@ setup: settings: index.number_of_replicas: 0 index.number_of_shards: 6 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 69b505097f2ec..433ac040dd1e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -1,12 +1,12 @@ --- "Split index ignores target template mapping": -# - skip: -# version: " - 6.0.99" -# reason: Added in 6.1.0 -# uncomment once AwaitsFix is resolved - skip: + # when re-enabling uncomment the below skips version: "all" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" + # version: " - 6.99.99" + # reason: expects warnings that pre-7.0.0 will not send + features: "warnings" # create index - do: @@ -68,6 +68,8 @@ settings: index.number_of_shards: 2 index.number_of_replicas: 0 + warnings: + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 1d3e37aa7b05d..e0ace991f4f0d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during split index": - skip: - version: " - 6.3.99" - reason: copy_settings did not exist prior to 6.4.0 + version: " - 6.99.99" + reason: expects warnings that pre-7.0.0 will not send features: "warnings" - do: @@ -50,8 +50,6 @@ index.number_of_replicas: 0 index.number_of_shards: 2 index.merge.scheduler.max_thread_count: 2 - warnings: - - "parameter [copy_settings] is deprecated but was [true]" - do: cluster.health: @@ -67,21 +65,20 @@ - match: { copy-settings-target.settings.index.blocks.write: "true" } - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } - # now we do a actual shrink and do not copy settings + # now we do a actual shrink and do not copy settings (by default) - do: indices.split: index: "source" target: "no-copy-settings-target" wait_for_active_shards: 1 master_timeout: 10s - copy_settings: false body: settings: index.number_of_replicas: 0 index.number_of_shards: 2 index.merge.scheduler.max_thread_count: 2 warnings: - - "parameter [copy_settings] is deprecated but was [false]" + - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -96,3 +93,15 @@ - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - is_false: no-copy-settings-target.settings.index.blocks.write - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id + + - do: + catch: /illegal_argument_exception/ + indices.split: + index: "source" + target: "explicit-no-copy-settings-target" + wait_for_active_shards: 1 + master_timeout: 10s + copy_settings: false + body: + settings: + index.number_of_replicas: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 86c6c632d5e94..f8d960e0c2597 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -17,14 +17,14 @@ setup: index: test type: doc id: 1 - body: { "date": "2014-03-03T00:00:00", "keyword": "foo" } + body: { "date": "2014-03-03T00:00:00", "keyword": "dgx" } - do: index: index: test type: doc id: 2 - body: { "date": "2015-03-03T00:00:00", "keyword": "bar" } + body: { "date": "2015-03-03T00:00:00", "keyword": "dfs" } - do: index: @@ -38,7 +38,36 @@ setup: index: test type: doc id: 4 - body: { "date": "2017-03-03T00:00:00" } + body: { "date": "2017-03-03T00:00:00", "keyword": "foo" } + + - do: + index: + index: test + type: doc + id: 5 + body: { "date": "2018-03-03T00:00:00", "keyword": "bar" } + + - do: + index: + index: test + type: doc + id: 6 + body: { "date": "2019-03-03T00:00:00", "keyword": "baz" } + + - do: + index: + index: test + type: doc + id: 7 + body: { "date": "2020-03-03T00:00:00", "keyword": "qux" } + + - do: + index: + index: test + type: doc + id: 8 + body: { "date": "2021-03-03T00:00:00", "keyword": "quux" } + - do: indices.refresh: diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index f53b5437f03c2..e510c0719df2d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -56,7 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest implements private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; - private boolean copySettings = false; + private Boolean copySettings; ResizeRequest() {} @@ -80,6 +80,7 @@ public ActionRequestValidationException validate() { if (type == ResizeType.SPLIT && IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) { validationException = addValidationError("index.number_of_shards is required for split operations", validationException); } + assert copySettings == null || copySettings; return validationException; } @@ -98,10 +99,12 @@ public void readFrom(StreamInput in) throws IOException { } else { type = ResizeType.SHRINK; // BWC this used to be shrink only } - if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + if (in.getVersion().before(Version.V_6_4_0)) { + copySettings = null; + } else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){ copySettings = in.readBoolean(); } else { - copySettings = false; + copySettings = in.readOptionalBoolean(); } } @@ -113,8 +116,12 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } - if (out.getVersion().onOrAfter(Version.V_6_4_0)) { - out.writeBoolean(copySettings); + if (out.getVersion().before(Version.V_6_4_0)) { + + } else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeBoolean(copySettings == null ? false : copySettings); + } else { + out.writeOptionalBoolean(copySettings); } } @@ -187,11 +194,14 @@ public ResizeType getResizeType() { return type; } - public void setCopySettings(final boolean copySettings) { + public void setCopySettings(final Boolean copySettings) { + if (copySettings != null && copySettings == false) { + throw new IllegalArgumentException("[copySettings] can not be explicitly set to [false]"); + } this.copySettings = copySettings; } - public boolean getCopySettings() { + public Boolean getCopySettings() { return copySettings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 834ef15ce264d..040504ea97460 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -190,7 +190,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Resi .waitForActiveShards(targetIndex.waitForActiveShards()) .recoverFrom(metaData.getIndex()) .resizeType(resizeRequest.getResizeType()) - .copySettings(resizeRequest.getCopySettings()); + .copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings()); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 166aad3ecaa12..0d8a374e66d42 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -366,8 +366,14 @@ public ClusterState execute(ClusterState currentState) throws Exception { } // now, put the request settings, so they override templates indexSettingsBuilder.put(request.settings()); + if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { + DiscoveryNodes nodes = currentState.nodes(); + final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); + indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); + } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { - indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5)); + final int numberOfShards = getNumberOfShards(indexSettingsBuilder); + indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, numberOfShards)); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -376,12 +382,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS)); } - if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { - DiscoveryNodes nodes = currentState.nodes(); - final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); - indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); - } - if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) { indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis()); } @@ -573,6 +573,18 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } + static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { + // TODO: this logic can be removed when the current major version is 8 + assert Version.CURRENT.major == 7; + final int numberOfShards; + if (Version.fromId(Integer.parseInt(indexSettingsBuilder.get(SETTING_VERSION_CREATED))).before(Version.V_7_0_0_alpha1)) { + numberOfShards = 5; + } else { + numberOfShards = 1; + } + return numberOfShards; + } + @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index f960664306f08..a9600681d1605 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -123,9 +123,6 @@ public InputStream readBlob(String name) throws IOException { @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - if (blobExists(blobName)) { - throw new FileAlreadyExistsException("blob [" + blobName + "] already exists, cannot overwrite"); - } final Path file = path.resolve(blobName); try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { Streams.copy(inputStream, outputStream); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java index a3707d9e44a0d..1d94cad150785 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/KeywordTokenizerFactory.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; public class KeywordTokenizerFactory extends AbstractTokenizerFactory { diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index 2d9e8e78b7768..1054744422638 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -39,11 +39,9 @@ import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.ChineseAnalyzerProvider; import org.elasticsearch.index.analysis.CjkAnalyzerProvider; -import org.elasticsearch.index.analysis.ClassicTokenizerFactory; import org.elasticsearch.index.analysis.CzechAnalyzerProvider; import org.elasticsearch.index.analysis.DanishAnalyzerProvider; import org.elasticsearch.index.analysis.DutchAnalyzerProvider; -import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory; import org.elasticsearch.index.analysis.EnglishAnalyzerProvider; import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider; import org.elasticsearch.index.analysis.FinnishAnalyzerProvider; @@ -60,14 +58,9 @@ import org.elasticsearch.index.analysis.KeywordAnalyzerProvider; import org.elasticsearch.index.analysis.KeywordTokenizerFactory; import org.elasticsearch.index.analysis.LatvianAnalyzerProvider; -import org.elasticsearch.index.analysis.LetterTokenizerFactory; import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider; -import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory; -import org.elasticsearch.index.analysis.NGramTokenizerFactory; import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider; -import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory; import org.elasticsearch.index.analysis.PatternAnalyzerProvider; -import org.elasticsearch.index.analysis.PatternTokenizerFactory; import org.elasticsearch.index.analysis.PersianAnalyzerProvider; import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; @@ -88,13 +81,10 @@ import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.SwedishAnalyzerProvider; import org.elasticsearch.index.analysis.ThaiAnalyzerProvider; -import org.elasticsearch.index.analysis.ThaiTokenizerFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.analysis.TurkishAnalyzerProvider; -import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory; import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider; -import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory; import org.elasticsearch.plugins.AnalysisPlugin; import java.io.IOException; @@ -223,36 +213,19 @@ static Map setupPreConfiguredTokenizers(List> setupTokenizers(List plugins) { NamedRegistry> tokenizers = new NamedRegistry<>("tokenizer"); tokenizers.register("standard", StandardTokenizerFactory::new); - tokenizers.register("uax_url_email", UAX29URLEmailTokenizerFactory::new); - tokenizers.register("path_hierarchy", PathHierarchyTokenizerFactory::new); - tokenizers.register("PathHierarchy", PathHierarchyTokenizerFactory::new); tokenizers.register("keyword", KeywordTokenizerFactory::new); - tokenizers.register("letter", LetterTokenizerFactory::new); - tokenizers.register("lowercase", LowerCaseTokenizerFactory::new); - tokenizers.register("whitespace", WhitespaceTokenizerFactory::new); - tokenizers.register("nGram", NGramTokenizerFactory::new); - tokenizers.register("ngram", NGramTokenizerFactory::new); - tokenizers.register("edgeNGram", EdgeNGramTokenizerFactory::new); - tokenizers.register("edge_ngram", EdgeNGramTokenizerFactory::new); - tokenizers.register("pattern", PatternTokenizerFactory::new); - tokenizers.register("classic", ClassicTokenizerFactory::new); - tokenizers.register("thai", ThaiTokenizerFactory::new); tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers); return tokenizers; } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java index 23e5e6795117a..6ccffd3a22fe2 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java @@ -19,18 +19,8 @@ package org.elasticsearch.indices.analysis; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.LetterTokenizer; -import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; -import org.apache.lucene.analysis.ngram.NGramTokenizer; -import org.apache.lucene.analysis.path.PathHierarchyTokenizer; -import org.apache.lucene.analysis.pattern.PatternTokenizer; -import org.apache.lucene.analysis.standard.ClassicTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; -import org.apache.lucene.analysis.th.ThaiTokenizer; import org.elasticsearch.Version; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; @@ -41,69 +31,6 @@ public enum PreBuiltTokenizers { protected Tokenizer create(Version version) { return new StandardTokenizer(); } - }, - - CLASSIC(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new ClassicTokenizer(); - } - }, - - UAX_URL_EMAIL(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new UAX29URLEmailTokenizer(); - } - }, - - PATH_HIERARCHY(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new PathHierarchyTokenizer(); - } - }, - - LETTER(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new LetterTokenizer(); - } - }, - - WHITESPACE(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new WhitespaceTokenizer(); - } - }, - - NGRAM(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new NGramTokenizer(); - } - }, - - EDGE_NGRAM(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); - } - }, - - PATTERN(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new PatternTokenizer(Regex.compile("\\W+", null), -1); - } - }, - - THAI(CachingStrategy.ONE) { - @Override - protected Tokenizer create(Version version) { - return new ThaiTokenizer(); - } } ; diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 553744e66ef04..52e0ac8ab860f 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.flush; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -501,8 +502,18 @@ private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { if (indexShard.routingEntry().primary() == false) { throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); } + if (Assertions.ENABLED) { + if (logger.isTraceEnabled()) { + logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations()); + } + } int opCount = indexShard.getActiveOperationsCount(); - logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount); + // Need to snapshot the debug info twice as it's updated concurrently with the permit count. + if (Assertions.ENABLED) { + if (logger.isTraceEnabled()) { + logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations()); + } + } return new InFlightOpsResponse(opCount); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index ae3a6aada0a7f..fcbc54efbf780 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -950,6 +950,20 @@ protected void finalize(final List snapshots, final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots); try { + // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier + // attempt to write an index file with this generation failed mid-way after creating the temporary file. + for (final String blobName : blobs.keySet()) { + if (indexShardSnapshotsFormat.isTempBlobName(blobName)) { + try { + blobContainer.deleteBlobIgnoringIfNotExists(blobName); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blob [{}] during finalization", + snapshotId, shardId, blobName), e); + throw e; + } + } + } + // If we deleted all snapshots, we don't need to create a new index file if (snapshots.size() > 0) { indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, blobContainer, indexGeneration); @@ -957,7 +971,7 @@ protected void finalize(final List snapshots, // Delete old index files for (final String blobName : blobs.keySet()) { - if (indexShardSnapshotsFormat.isTempBlobName(blobName) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) { + if (blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) { try { blobContainer.deleteBlobIgnoringIfNotExists(blobName); } catch (IOException e) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index e6c994a85c35d..bc5db552b9dd2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -48,17 +48,22 @@ public final RestChannelConsumer prepareRequest(final RestRequest request, final final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); resizeRequest.setResizeType(getResizeType()); final String rawCopySettings = request.param("copy_settings"); - final boolean copySettings; + final Boolean copySettings; if (rawCopySettings == null) { copySettings = resizeRequest.getCopySettings(); + } else if (rawCopySettings.isEmpty()) { + copySettings = true; } else { - deprecationLogger.deprecated("parameter [copy_settings] is deprecated but was [" + rawCopySettings + "]"); - if (rawCopySettings.length() == 0) { - copySettings = true; - } else { - copySettings = Booleans.parseBoolean(rawCopySettings); + copySettings = Booleans.parseBoolean(rawCopySettings); + if (copySettings == false) { + throw new IllegalArgumentException("parameter [copy_settings] can not be explicitly set to [false]"); } } + if (copySettings == null) { + deprecationLogger.deprecated( + "resize operations without copying settings is deprecated; " + + "set parameter [copy_settings] to [true] for future default behavior"); + } resizeRequest.setCopySettings(copySettings); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index 515606dd10bfa..de11902d9141d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -287,7 +287,7 @@ public void testUnknown() throws IOException { e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( new AnalyzeRequest() - .tokenizer("whitespace") + .tokenizer("standard") .addTokenFilter("foobar") .text("the qu1ck brown fox"), "text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount)); @@ -300,7 +300,7 @@ public void testUnknown() throws IOException { e = expectThrows(IllegalArgumentException.class, () -> TransportAnalyzeAction.analyze( new AnalyzeRequest() - .tokenizer("whitespace") + .tokenizer("standard") .addTokenFilter("lowercase") .addCharFilter("foobar") .text("the qu1ck brown fox"), @@ -322,7 +322,7 @@ public void testUnknown() throws IOException { public void testNonPreBuildTokenFilter() throws IOException { AnalyzeRequest request = new AnalyzeRequest(); - request.tokenizer("whitespace"); + request.tokenizer("standard"); request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters() request.text("the quick brown fox"); AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index e4bb197f80ace..8443ac2bf2e3d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -76,6 +77,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416") public class ShrinkIndexIT extends ESIntegTestCase { @Override @@ -83,7 +85,6 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416") public void testCreateShrinkIndexToN() { int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; int[] shardSplits = randomFrom(possibleShardSplits); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java index 77ead591a01f2..ba595de5215a3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestTests.java @@ -31,12 +31,34 @@ import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.io.IOException; +import java.util.function.Consumer; +import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; public class ResizeRequestTests extends ESTestCase { + public void testCopySettingsValidation() { + runTestCopySettingsValidation(false, r -> { + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, r::get); + assertThat(e, hasToString(containsString("[copySettings] can not be explicitly set to [false]"))); + }); + + runTestCopySettingsValidation(null, r -> assertNull(r.get().getCopySettings())); + runTestCopySettingsValidation(true, r -> assertTrue(r.get().getCopySettings())); + } + + private void runTestCopySettingsValidation(final Boolean copySettings, final Consumer> consumer) { + consumer.accept(() -> { + final ResizeRequest request = new ResizeRequest(); + request.setCopySettings(copySettings); + return request; + }); + } + public void testToXContent() throws IOException { { ResizeRequest request = new ResizeRequest("target", "source"); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 79cc13594e98a..c55e4851edbc2 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -188,7 +188,7 @@ public void testSimpleTermVectors() throws IOException { .addAlias(new Alias("alias")) .setSettings(Settings.builder() .put(indexSettings()) - .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace") + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") .putList("index.analysis.analyzer.tv_test.filter", "lowercase"))); for (int i = 0; i < 10; i++) { client().prepareIndex("test", "type1", Integer.toString(i)) @@ -260,7 +260,7 @@ public void testRandomSingleTermVectors() throws IOException { .endObject().endObject(); assertAcked(prepareCreate("test").addMapping("type1", mapping) .setSettings(Settings.builder() - .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace") + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") .putList("index.analysis.analyzer.tv_test.filter", "lowercase"))); for (int i = 0; i < 10; i++) { client().prepareIndex("test", "type1", Integer.toString(i)) @@ -394,7 +394,7 @@ public void testSimpleTermVectorsWithGenerate() throws IOException { .addMapping("type1", mapping) .setSettings(Settings.builder() .put(indexSettings()) - .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace") + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") .putList("index.analysis.analyzer.tv_test.filter", "lowercase"))); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java index 3ce7dc3cd2a46..0e8877701e4b9 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.termvectors; +import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.payloads.FloatEncoder; @@ -35,6 +36,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.analysis.PreConfiguredTokenizer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.plugins.AnalysisPlugin; @@ -93,6 +95,12 @@ public TokenStream create(TokenStream tokenStream) { }); } + @Override + public List getPreConfiguredTokenizers() { + return Collections.singletonList(PreConfiguredTokenizer.singleton("mock-whitespace", + () -> new MockTokenizer(MockTokenizer.WHITESPACE, false), null)); + } + // Based on DelimitedPayloadTokenFilter: final class MockPayloadTokenFilter extends TokenFilter { private final char delimiter; @@ -151,7 +159,7 @@ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOExceptio .startObject("field").field("type", "text").field("term_vector", "with_positions_offsets_payloads") .field("analyzer", "payload_test").endObject().endObject().endObject().endObject(); Settings setting = Settings.builder() - .put("index.analysis.analyzer.payload_test.tokenizer", "whitespace") + .put("index.analysis.analyzer.payload_test.tokenizer", "mock-whitespace") .putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload") .put("index.analysis.filter.my_delimited_payload.delimiter", delimiter) .put("index.analysis.filter.my_delimited_payload.encoding", encodingString) diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index d887387d43fe9..ccdc1d6ab3323 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -96,7 +96,7 @@ public void testDisablingAllocationFiltering() throws Exception { logger.info("--> creating an index with no replicas"); client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)) .execute().actionGet(); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index ad36457bde505..de8251ece255f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -185,7 +185,7 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { public void testDefaultSettings() throws Exception { final ClusterState result = executeTask(); - assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("5")); + assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("1")); } public void testSettingsFromClusterState() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index d5f3d71d7ee26..24f5a69656114 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -56,6 +56,7 @@ import java.util.stream.Stream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -92,6 +93,21 @@ public static boolean isSplitable(int source, int target) { return source * x == target; } + public void testNumberOfShards() { + { + final Version versionCreated = VersionUtils.randomVersionBetween( + random(), + Version.V_6_0_0_alpha1, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1)); + final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); + assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(5)); + } + { + final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0_alpha1, Version.CURRENT); + final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); + assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(1)); + } + } + public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index 9698ab18c198b..f209f771ab089 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -137,6 +137,7 @@ public void tearDown() throws Exception { private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28685") public void testSimplePings() throws IOException, InterruptedException, ExecutionException { // use ephemeral ports final Settings settings = Settings.builder().put("cluster.name", "test").put(TcpTransport.PORT.getKey(), 0).build(); diff --git a/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index 9f214082d4b22..802761780a713 100644 --- a/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/server/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -35,10 +35,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; - public class AnalyzeActionIT extends ESIntegTestCase { public void testSimpleAnalyzerTests() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); @@ -333,14 +331,14 @@ public void testCustomTokenFilterInRequest() throws Exception { AnalyzeResponse analyzeResponse = client().admin().indices() .prepareAnalyze() .setText("Foo buzz test") - .setTokenizer("whitespace") + .setTokenizer("standard") .addTokenFilter("lowercase") .addTokenFilter(stopFilterSettings) .setExplain(true) .get(); //tokenizer - assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("whitespace")); + assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("standard")); assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3)); assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("Foo")); assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0)); @@ -393,41 +391,6 @@ public void testCustomTokenFilterInRequest() throws Exception { assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens()[0].getPositionLength(), equalTo(1)); } - public void testCustomTokenizerInRequest() throws Exception { - Map tokenizerSettings = new HashMap<>(); - tokenizerSettings.put("type", "nGram"); - tokenizerSettings.put("min_gram", 2); - tokenizerSettings.put("max_gram", 2); - - AnalyzeResponse analyzeResponse = client().admin().indices() - .prepareAnalyze() - .setText("good") - .setTokenizer(tokenizerSettings) - .setExplain(true) - .get(); - - //tokenizer - assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("_anonymous_tokenizer")); - assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("go")); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getEndOffset(), equalTo(2)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPosition(), equalTo(0)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPositionLength(), equalTo(1)); - - assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getTerm(), equalTo("oo")); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getStartOffset(), equalTo(1)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getEndOffset(), equalTo(3)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getPosition(), equalTo(1)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getPositionLength(), equalTo(1)); - - assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getTerm(), equalTo("od")); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getStartOffset(), equalTo(2)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getEndOffset(), equalTo(4)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getPosition(), equalTo(2)); - assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getPositionLength(), equalTo(1)); - } - public void testAnalyzeKeywordField() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "keyword", "type=keyword")); ensureGreen("test"); diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 27e1c1af2bb83..e31e605b6b10f 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -254,7 +254,7 @@ private String syncedFlushDescription(ShardsSyncedFlushResult result) { result.totalShards(), result.failed(), result.failureReason(), detail); } - @TestLogging("_root:DEBUG") + @TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE") public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -296,7 +296,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } - @TestLogging("_root:DEBUG") + @TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE") public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; diff --git a/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 2c6f7675673b4..be4c09d96da86 100644 --- a/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -677,7 +677,7 @@ public void testCombineTemplates() throws Exception{ " \"analysis\" : {\n" + " \"analyzer\" : {\n" + " \"custom_1\" : {\n" + - " \"tokenizer\" : \"whitespace\"\n" + + " \"tokenizer\" : \"standard\"\n" + " }\n" + " }\n" + " }\n" + diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java index 75071309458cc..2c30184ee4e35 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java @@ -20,15 +20,20 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import java.io.IOException; import java.util.Collections; +import java.util.Locale; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; import static org.mockito.Mockito.mock; public class RestResizeHandlerTests extends ESTestCase { @@ -36,27 +41,41 @@ public class RestResizeHandlerTests extends ESTestCase { public void testShrinkCopySettingsDeprecated() throws IOException { final RestResizeHandler.RestShrinkIndexAction handler = new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class)); - final String copySettings = randomFrom("true", "false"); - final FakeRestRequest request = - new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) - .withParams(Collections.singletonMap("copy_settings", copySettings)) - .withPath("source/_shrink/target") - .build(); - handler.prepareRequest(request, mock(NodeClient.class)); - assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]"); + for (final String copySettings : new String[]{null, "", "true", "false"}) { + runTestResizeCopySettingsDeprecated(handler, "shrink", copySettings); + } } public void testSplitCopySettingsDeprecated() throws IOException { final RestResizeHandler.RestSplitIndexAction handler = new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class)); - final String copySettings = randomFrom("true", "false"); - final FakeRestRequest request = + for (final String copySettings : new String[]{null, "", "true", "false"}) { + runTestResizeCopySettingsDeprecated(handler, "split", copySettings); + } + } + + private void runTestResizeCopySettingsDeprecated( + final RestResizeHandler handler, final String resizeOperation, final String copySettings) throws IOException { + final FakeRestRequest.Builder builder = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) .withParams(Collections.singletonMap("copy_settings", copySettings)) - .withPath("source/_split/target") - .build(); - handler.prepareRequest(request, mock(NodeClient.class)); - assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]"); + .withPath(String.format(Locale.ROOT, "source/_%s/target", resizeOperation)); + if (copySettings != null) { + builder.withParams(Collections.singletonMap("copy_settings", copySettings)); + } + final FakeRestRequest request = builder.build(); + if ("false".equals(copySettings)) { + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> handler.prepareRequest(request, mock(NodeClient.class))); + assertThat(e, hasToString(containsString("parameter [copy_settings] can not be explicitly set to [false]"))); + } else { + handler.prepareRequest(request, mock(NodeClient.class)); + if (copySettings == null) { + assertWarnings( + "resize operations without copying settings is deprecated; " + + "set parameter [copy_settings] to [true] for future default behavior"); + } + } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 43c7010d4b023..73a3c553b4d1a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -19,9 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.moving.avg; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -45,7 +43,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -69,7 +66,6 @@ import static org.hamcrest.core.IsNull.nullValue; @ESIntegTestCase.SuiteScopeTestCase -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456") public class MovAvgIT extends ESIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -1308,7 +1304,7 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", - valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value())); + valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(valuesMovAvg.value())); } } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 7f61655a09273..9011b0b8dd69c 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -1359,7 +1359,7 @@ public void testCommonTermsTermVector() throws IOException { public void testPhrasePrefix() throws IOException { Builder builder = Settings.builder() .put(indexSettings()) - .put("index.analysis.analyzer.synonym.tokenizer", "whitespace") + .put("index.analysis.analyzer.synonym.tokenizer", "standard") .putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase") .put("index.analysis.filter.synonym.type", "synonym") .putList("index.analysis.filter.synonym.synonyms", "quick => fast"); @@ -2804,7 +2804,7 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { public void testSynonyms() throws IOException { Builder builder = Settings.builder() .put(indexSettings()) - .put("index.analysis.analyzer.synonym.tokenizer", "whitespace") + .put("index.analysis.analyzer.synonym.tokenizer", "standard") .putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase") .put("index.analysis.filter.synonym.type", "synonym") .putList("index.analysis.filter.synonym.synonyms", "fast,quick"); diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 58565b5f264b7..fe50aaf9b73d7 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -156,7 +156,7 @@ public void testRescorePhrase() throws Exception { public void testMoreDocs() throws Exception { Builder builder = Settings.builder(); - builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace"); + builder.put("index.analysis.analyzer.synonym.tokenizer", "standard"); builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase"); builder.put("index.analysis.filter.synonym.type", "synonym"); builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street"); @@ -234,7 +234,7 @@ public void testMoreDocs() throws Exception { // Tests a rescore window smaller than number of hits: public void testSmallRescoreWindow() throws Exception { Builder builder = Settings.builder(); - builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace"); + builder.put("index.analysis.analyzer.synonym.tokenizer", "standard"); builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase"); builder.put("index.analysis.filter.synonym.type", "synonym"); builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street"); @@ -306,7 +306,7 @@ public void testSmallRescoreWindow() throws Exception { // Tests a rescorer that penalizes the scores: public void testRescorerMadeScoresWorse() throws Exception { Builder builder = Settings.builder(); - builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace"); + builder.put("index.analysis.analyzer.synonym.tokenizer", "standard"); builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase"); builder.put("index.analysis.filter.synonym.type", "synonym"); builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street"); diff --git a/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index fd619b69c9eff..c8d57b968568f 100644 --- a/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -82,7 +82,7 @@ public void init() throws Exception { .put("index.analysis.analyzer.perfect_match.tokenizer", "keyword") .put("index.analysis.analyzer.perfect_match.filter", "lowercase") .put("index.analysis.analyzer.category.type", "custom") - .put("index.analysis.analyzer.category.tokenizer", "whitespace") + .put("index.analysis.analyzer.category.tokenizer", "standard") .put("index.analysis.analyzer.category.filter", "lowercase") ); assertAcked(builder.addMapping("test", createMapping())); diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 2cab6e995b25c..7e1231f90592e 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.query; import org.apache.lucene.util.English; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -30,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; @@ -351,7 +349,7 @@ public void testCommonTermsQueryStackedTokens() throws Exception { .put(SETTING_NUMBER_OF_SHARDS,1) .put("index.analysis.filter.syns.type","synonym") .putList("index.analysis.filter.syns.synonyms","quick,fast") - .put("index.analysis.analyzer.syns.tokenizer","whitespace") + .put("index.analysis.analyzer.syns.tokenizer","standard") .put("index.analysis.analyzer.syns.filter","syns") ) .addMapping("type1", "field1", "type=text,analyzer=syns", "field2", "type=text,analyzer=syns")); @@ -1764,56 +1762,6 @@ public void testSearchEmptyDoc() { assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } - // see #5120 - public void testNGramCopyField() { - CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder() - .put(indexSettings()) - .put(IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey(), 9) - .put("index.analysis.analyzer.my_ngram_analyzer.type", "custom") - .put("index.analysis.analyzer.my_ngram_analyzer.tokenizer", "my_ngram_tokenizer") - .put("index.analysis.tokenizer.my_ngram_tokenizer.type", "nGram") - .put("index.analysis.tokenizer.my_ngram_tokenizer.min_gram", "1") - .put("index.analysis.tokenizer.my_ngram_tokenizer.max_gram", "10") - .putList("index.analysis.tokenizer.my_ngram_tokenizer.token_chars", new String[0])); - assertAcked(builder.addMapping("test", "origin", "type=text,copy_to=meta", "meta", "type=text,analyzer=my_ngram_analyzer")); - // we only have ngrams as the index analyzer so searches will get standard analyzer - - - client().prepareIndex("test", "test", "1").setSource("origin", "C.A1234.5678") - .setRefreshPolicy(IMMEDIATE) - .get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(matchQuery("meta", "1234")) - .get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(matchQuery("meta", "1234.56")) - .get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(termQuery("meta", "A1234")) - .get(); - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(termQuery("meta", "a1234")) - .get(); - assertHitCount(searchResponse, 0L); // it's upper case - - searchResponse = client().prepareSearch("test") - .setQuery(matchQuery("meta", "A1234").analyzer("my_ngram_analyzer")) - .get(); // force ngram analyzer - assertHitCount(searchResponse, 1L); - - searchResponse = client().prepareSearch("test") - .setQuery(matchQuery("meta", "a1234").analyzer("my_ngram_analyzer")) - .get(); // this one returns a hit since it's default operator is OR - assertHitCount(searchResponse, 1L); - } - public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedException { createIndex("test1"); indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "Johnnie Walker Black Label"), diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index feb15044438ec..677cc4163ccf7 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -427,7 +427,7 @@ public void testSizeAndSort() throws Exception { public void testStopwordsOnlyPhraseSuggest() throws IOException { assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=text,analyzer=stopwd").setSettings( Settings.builder() - .put("index.analysis.analyzer.stopwd.tokenizer", "whitespace") + .put("index.analysis.analyzer.stopwd.tokenizer", "standard") .putList("index.analysis.analyzer.stopwd.filter", "stop") )); ensureGreen(); diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index f6e5e6a85b4a5..055139eacf716 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -22,18 +22,10 @@ import org.apache.lucene.analysis.util.CharFilterFactory; import org.apache.lucene.analysis.util.TokenFilterFactory; import org.apache.lucene.analysis.util.TokenizerFactory; -import org.elasticsearch.Version; import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.index.analysis.ClassicTokenizerFactory; -import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory; import org.elasticsearch.index.analysis.HunspellTokenFilterFactory; import org.elasticsearch.index.analysis.KeywordTokenizerFactory; -import org.elasticsearch.index.analysis.LetterTokenizerFactory; -import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory; import org.elasticsearch.index.analysis.MultiTermAwareComponent; -import org.elasticsearch.index.analysis.NGramTokenizerFactory; -import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory; -import org.elasticsearch.index.analysis.PatternTokenizerFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -43,9 +35,6 @@ import org.elasticsearch.index.analysis.StopTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory; import org.elasticsearch.index.analysis.SynonymTokenFilterFactory; -import org.elasticsearch.index.analysis.ThaiTokenizerFactory; -import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory; -import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.test.ESTestCase; @@ -88,20 +77,20 @@ private static String toCamelCase(String s) { static final Map> KNOWN_TOKENIZERS = new MapBuilder>() // exposed in ES - .put("classic", ClassicTokenizerFactory.class) - .put("edgengram", EdgeNGramTokenizerFactory.class) + .put("classic", MovedToAnalysisCommon.class) + .put("edgengram", MovedToAnalysisCommon.class) .put("keyword", KeywordTokenizerFactory.class) - .put("letter", LetterTokenizerFactory.class) - .put("lowercase", LowerCaseTokenizerFactory.class) - .put("ngram", NGramTokenizerFactory.class) - .put("pathhierarchy", PathHierarchyTokenizerFactory.class) - .put("pattern", PatternTokenizerFactory.class) + .put("letter", MovedToAnalysisCommon.class) + .put("lowercase", MovedToAnalysisCommon.class) + .put("ngram", MovedToAnalysisCommon.class) + .put("pathhierarchy", MovedToAnalysisCommon.class) + .put("pattern", MovedToAnalysisCommon.class) .put("simplepattern", MovedToAnalysisCommon.class) .put("simplepatternsplit", MovedToAnalysisCommon.class) .put("standard", StandardTokenizerFactory.class) - .put("thai", ThaiTokenizerFactory.class) - .put("uax29urlemail", UAX29URLEmailTokenizerFactory.class) - .put("whitespace", WhitespaceTokenizerFactory.class) + .put("thai", MovedToAnalysisCommon.class) + .put("uax29urlemail", MovedToAnalysisCommon.class) + .put("whitespace", MovedToAnalysisCommon.class) // this one "seems to mess up offsets". probably shouldn't be a tokenizer... .put("wikipedia", Void.class) @@ -294,23 +283,8 @@ protected Map> getPreConfiguredTokenizers() { Map> tokenizers = new HashMap<>(); // TODO drop this temporary shim when all the old style tokenizers have been migrated to new style for (PreBuiltTokenizers tokenizer : PreBuiltTokenizers.values()) { - final Class luceneFactoryClazz; - switch (tokenizer) { - case UAX_URL_EMAIL: - luceneFactoryClazz = org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class; - break; - case PATH_HIERARCHY: - luceneFactoryClazz = Void.class; - break; - default: - luceneFactoryClazz = null; - } - tokenizers.put(tokenizer.name().toLowerCase(Locale.ROOT), luceneFactoryClazz); + tokenizers.put(tokenizer.name().toLowerCase(Locale.ROOT), null); } - // TODO drop aliases once they are moved to module - tokenizers.put("nGram", tokenizers.get("ngram")); - tokenizers.put("edgeNGram", tokenizers.get("edge_ngram")); - tokenizers.put("PathHierarchy", tokenizers.get("path_hierarchy")); return tokenizers; } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 8aff12edc8a53..743be6d1bcb01 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.io.InputStream; +import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.HashMap; @@ -149,7 +150,7 @@ public void testVerifyOverwriteFails() throws IOException { final BytesArray bytesArray = new BytesArray(data); writeBlob(container, blobName, bytesArray); // should not be able to overwrite existing blob - expectThrows(IOException.class, () -> writeBlob(container, blobName, bytesArray)); + expectThrows(FileAlreadyExistsException.class, () -> writeBlob(container, blobName, bytesArray)); container.deleteBlob(blobName); writeBlob(container, blobName, bytesArray); // after deleting the previous blob, we should be able to write to it again } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index dfd3713333543..0a51fbdb8bd9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -519,6 +519,19 @@ public static byte randomByte() { return (byte) random().nextInt(); } + /** + * Helper method to create a byte array of a given length populated with random byte values + * + * @see #randomByte() + */ + public static byte[] randomByteArrayOfLength(int size) { + byte[] bytes = new byte[size]; + for (int i = 0; i < size; i++) { + bytes[i] = randomByte(); + } + return bytes; + } + public static short randomShort() { return (short) random().nextInt(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 950bb14eed9af..ab99bc0d97ba4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -21,7 +21,10 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -29,6 +32,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -38,6 +42,7 @@ import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Files; @@ -94,6 +99,13 @@ protected ESClientYamlSuiteTestCase(ClientYamlTestCandidate testCandidate) { this.testCandidate = testCandidate; } + private static boolean useDefaultNumberOfShards; + + @BeforeClass + public static void initializeUseDefaultNumberOfShards() { + useDefaultNumberOfShards = usually(); + } + @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { @@ -318,6 +330,14 @@ public void test() throws IOException { throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]"); } + if (useDefaultNumberOfShards == false + && testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) { + final Request request = new Request("PUT", "/_template/global"); + request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters())); + request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}")); + adminClient().performRequest(request); + } + if (!testCandidate.getSetupSection().isEmpty()) { logger.debug("start setup test [{}]", testCandidate.getTestPath()); for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index ab9be65514a96..d074dd82af7a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -37,6 +37,7 @@ public final class Features { private static final List SUPPORTED = unmodifiableList(Arrays.asList( "catch_unauthorized", + "default_shards", "embedded_stash_key", "headers", "stash_in_key", diff --git a/x-pack/build.gradle b/x-pack/build.gradle index ea7ea86887b06..c84e82dec4775 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -56,13 +56,4 @@ subprojects { ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-upgrade:${version}": xpackModule('upgrade')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-watcher:${version}": xpackModule('watcher')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ccr:${version}": xpackModule('ccr')] - - bwcVersions.snapshotProjectNames.each { snapshotName -> - Version snapshot = bwcVersions.getSnapshotForProject(snapshotName) - if (snapshot != null && snapshot.onOrAfter("6.3.0")) { - String snapshotProject = ":x-pack:plugin:bwc:${snapshotName}" - project(snapshotProject).ext.bwcVersion = snapshot - ext.projectSubstitutions["org.elasticsearch.plugin:x-pack:${snapshot}"] = snapshotProject - } - } } diff --git a/x-pack/plugin/bwc/build.gradle b/x-pack/plugin/bwc/build.gradle deleted file mode 100644 index 757448e35cd12..0000000000000 --- a/x-pack/plugin/bwc/build.gradle +++ /dev/null @@ -1,226 +0,0 @@ -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.test.NodeInfo - -import static org.elasticsearch.gradle.BuildPlugin.getJavaHome - -/** - * Subdirectories of this project are dummy projects which does a local - * checkout of the appropriate version's branch, and builds a snapshot. This - * allows backcompat tests to test against the next unreleased versions - * without relying on snapshots. - */ - -subprojects { - - Version bwcVersion = bwcVersions.getSnapshotForProject(project.name) - if (bwcVersion == null) { - // this project wont do anything - return - } - - String bwcBranch - if (project.name == 'next-minor-snapshot') { - // this is always a .x series - bwcBranch = "${bwcVersion.major}.x" - } else { - bwcBranch = "${bwcVersion.major}.${bwcVersion.minor}" - } - - apply plugin: 'distribution' - // Not published so no need to assemble - tasks.remove(assemble) - build.dependsOn.remove('assemble') - - File esCheckoutDir = file("${buildDir}/bwc/checkout-es-${bwcBranch}") - /* Delay building the path as the path will not exist during configuration which will - * fail on Windows due to getting the short name requiring the path to already exist. - */ - Object esCheckoutPath = """${-> - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - esCheckoutDir.mkdirs() - NodeInfo.getShortPathName(esCheckoutDir.toString()) - } else { - esCheckoutDir.toString() - } - }""" - File xpackCheckoutDir = file("${esCheckoutDir}-extra/x-pack-elasticsearch") - Object xpackCheckoutPath = """${-> - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - xpackCheckoutDir.mkdirs() - NodeInfo.getShortPathName(xpackCheckoutDir.toString()) - } else { - xpackCheckoutDir.toString() - } - }""" - - final String remote = System.getProperty("tests.bwc.remote", "elastic") - - task createElasticsearchClone(type: LoggedExec) { - onlyIf { esCheckoutDir.exists() == false } - commandLine = ['git', 'clone', rootDir, esCheckoutPath] - } - - task createXPackClone(type: LoggedExec) { - onlyIf { xpackCheckoutDir.exists() == false } - commandLine = ['git', 'clone', xpackRootProject.projectDir, xpackCheckoutPath] - } - - // we use regular Exec here to ensure we always get output, regardless of logging level - task findElasticsearchRemote(type: Exec) { - dependsOn createElasticsearchClone - workingDir = esCheckoutDir - commandLine = ['git', 'remote', '-v'] - ignoreExitValue = true - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - if (execResult.exitValue != 0) { - output.toString('UTF-8').eachLine { line -> logger.error(line) } - execResult.assertNormalExitValue() - } - project.ext.esRemoteExists = false - output.toString('UTF-8').eachLine { - if (it.contains("${remote}\t")) { - project.ext.esRemoteExists = true - } - } - } - } - - task findXPackRemote(type: Exec) { - dependsOn createXPackClone - workingDir = xpackCheckoutDir - commandLine = ['git', 'remote', '-v'] - ignoreExitValue = true - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - if (execResult.exitValue != 0) { - output.toString('UTF-8').eachLine { line -> logger.error(line) } - execResult.assertNormalExitValue() - } - project.ext.xpackRemoteExists = false - output.toString('UTF-8').eachLine { - if (it.contains("${remote}\t")) { - project.ext.xpackRemoteExists = true - } - } - } - } - - task addElasticsearchRemote(type: LoggedExec) { - dependsOn findElasticsearchRemote - onlyIf { project.ext.esRemoteExists == false } - workingDir = esCheckoutDir - commandLine = ['git', 'remote', 'add', "${remote}", "git@github.com:${remote}/elasticsearch.git"] - } - - task addXPackRemote(type: LoggedExec) { - dependsOn findXPackRemote - onlyIf { project.ext.xpackRemoteExists == false } - workingDir = xpackCheckoutDir - commandLine = ['git', 'remote', 'add', "${remote}", "git@github.com:${remote}/x-pack-elasticsearch.git"] - } - - task fetchElasticsearchLatest(type: LoggedExec) { - dependsOn addElasticsearchRemote - workingDir = esCheckoutDir - commandLine = ['git', 'fetch', '--all'] - } - - task fetchXPackLatest(type: LoggedExec) { - dependsOn addXPackRemote - workingDir = xpackCheckoutDir - commandLine = ['git', 'fetch', '--all'] - } - - String esBuildMetadataKey = "bwc_refspec_${project.path.substring(1)}_elasticsearch" - task checkoutElasticsearchBwcBranch(type: LoggedExec) { - dependsOn fetchElasticsearchLatest - def String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(esBuildMetadataKey, "${remote}/${bwcBranch}")) - workingDir = esCheckoutDir - commandLine = ['git', 'checkout', refspec] - } - - String xpackBuildMetadataKey = "bwc_refspec_${project.path.substring(1)}_xpack" - task checkoutXPackBwcBranch(type: LoggedExec) { - dependsOn fetchXPackLatest - def String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(xpackBuildMetadataKey, "${remote}/${bwcBranch}")) - workingDir = xpackCheckoutDir - commandLine = ['git', 'checkout', refspec] - } - - File esBuildMetadataFile = project.file("build/${project.name}_elasticsearch/build_metadata") - task writeElasticsearchBuildMetadata(type: LoggedExec) { - dependsOn checkoutElasticsearchBwcBranch - workingDir = esCheckoutDir - commandLine = ['git', 'rev-parse', 'HEAD'] - ignoreExitValue = true - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - if (execResult.exitValue != 0) { - output.toString('UTF-8').eachLine { line -> logger.error(line) } - execResult.assertNormalExitValue() - } - project.mkdir(esBuildMetadataFile.parent) - esBuildMetadataFile.setText("${esBuildMetadataKey}=${output.toString('UTF-8')}", 'UTF-8') - } - } - - File xpackBuildMetadataFile = project.file("build/${project.name}_xpack/build_metadata") - task writeXPackBuildMetadata(type: LoggedExec) { - dependsOn checkoutXPackBwcBranch - workingDir = xpackCheckoutDir - commandLine = ['git', 'rev-parse', 'HEAD'] - ignoreExitValue = true - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - if (execResult.exitValue != 0) { - output.toString('UTF-8').eachLine { line -> logger.error(line) } - execResult.assertNormalExitValue() - } - project.mkdir(xpackBuildMetadataFile.parent) - xpackBuildMetadataFile.setText("${xpackBuildMetadataKey}=${output.toString('UTF-8')}", 'UTF-8') - } - } - - File bwcZip = file("${xpackCheckoutDir}/plugin/build/distributions/x-pack-${bwcVersion}.zip") - task buildBwcVersion(type: Exec) { - dependsOn checkoutXPackBwcBranch, checkoutElasticsearchBwcBranch, writeElasticsearchBuildMetadata, writeXPackBuildMetadata - workingDir = xpackCheckoutDir - if (["5.6", "6.0", "6.1"].contains(bwcBranch)) { - // we are building branches that are officially built with JDK 8, push JAVA8_HOME to JAVA_HOME for these builds - environment('JAVA_HOME', getJavaHome(it, 8)) - } else if ("6.2".equals(bwcBranch)) { - environment('JAVA_HOME', getJavaHome(it, 9)) - } else { - environment('JAVA_HOME', project.compilerJavaHome) - } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable 'cmd' - args '/C', 'call', new File(xpackCheckoutDir, 'gradlew').toString() - } else { - executable new File(xpackCheckoutDir, 'gradlew').toString() - } - args ":x-pack-elasticsearch:plugin:assemble", "-Dbuild.snapshot=true" - final LogLevel logLevel = gradle.startParameter.logLevel - if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { - args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" - } - final String showStacktraceName = gradle.startParameter.showStacktrace.name() - assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) - if (showStacktraceName.equals("ALWAYS")) { - args "--stacktrace" - } else if (showStacktraceName.equals("ALWAYS_FULL")) { - args "--full-stacktrace" - } - } - - artifacts { - 'default' file: bwcZip, name: 'x-pack', type: 'zip', builtBy: buildBwcVersion - } -} diff --git a/x-pack/plugin/bwc/maintenance-bugfix-snapshot/build.gradle b/x-pack/plugin/bwc/maintenance-bugfix-snapshot/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/plugin/bwc/next-bugfix-snapshot/build.gradle b/x-pack/plugin/bwc/next-bugfix-snapshot/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/plugin/bwc/next-minor-snapshot/build.gradle b/x-pack/plugin/bwc/next-minor-snapshot/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/plugin/bwc/staged-minor-snapshot/build.gradle b/x-pack/plugin/bwc/staged-minor-snapshot/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java index f7998a52d496a..1fb387b0b6c2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateJobAction.java @@ -45,7 +45,7 @@ public PutJobAction.Response newResponse() { public static class Request extends AcknowledgedRequest implements ToXContentObject { public static UpdateJobAction.Request parseRequest(String jobId, XContentParser parser) { - JobUpdate update = JobUpdate.PARSER.apply(parser, null).setJobId(jobId).build(); + JobUpdate update = JobUpdate.EXTERNAL_PARSER.apply(parser, null).setJobId(jobId).build(); return new UpdateJobAction.Request(jobId, update); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 8644254b92162..2c7ee538485b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -30,26 +30,35 @@ public class JobUpdate implements Writeable, ToXContentObject { public static final ParseField DETECTORS = new ParseField("detectors"); - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + // For internal updates + static final ConstructingObjectParser INTERNAL_PARSER = new ConstructingObjectParser<>( + "job_update", args -> new Builder((String) args[0])); + + // For parsing REST requests + public static final ConstructingObjectParser EXTERNAL_PARSER = new ConstructingObjectParser<>( "job_update", args -> new Builder((String) args[0])); static { - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); - PARSER.declareStringArray(Builder::setGroups, Job.GROUPS); - PARSER.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); - PARSER.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); - PARSER.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.CONFIG_PARSER, Job.MODEL_PLOT_CONFIG); - PARSER.declareObject(Builder::setAnalysisLimits, AnalysisLimits.CONFIG_PARSER, Job.ANALYSIS_LIMITS); - PARSER.declareString((builder, val) -> builder.setBackgroundPersistInterval( - TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL); - PARSER.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); - PARSER.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); - PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); - PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); - PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); - PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID); - PARSER.declareString(Builder::setModelSnapshotMinVersion, Job.MODEL_SNAPSHOT_MIN_VERSION); - PARSER.declareLong(Builder::setEstablishedModelMemory, Job.ESTABLISHED_MODEL_MEMORY); + for (ConstructingObjectParser parser : Arrays.asList(INTERNAL_PARSER, EXTERNAL_PARSER)) { + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), Job.ID); + parser.declareStringArray(Builder::setGroups, Job.GROUPS); + parser.declareStringOrNull(Builder::setDescription, Job.DESCRIPTION); + parser.declareObjectArray(Builder::setDetectorUpdates, DetectorUpdate.PARSER, DETECTORS); + parser.declareObject(Builder::setModelPlotConfig, ModelPlotConfig.CONFIG_PARSER, Job.MODEL_PLOT_CONFIG); + parser.declareObject(Builder::setAnalysisLimits, AnalysisLimits.CONFIG_PARSER, Job.ANALYSIS_LIMITS); + parser.declareString((builder, val) -> builder.setBackgroundPersistInterval( + TimeValue.parseTimeValue(val, Job.BACKGROUND_PERSIST_INTERVAL.getPreferredName())), Job.BACKGROUND_PERSIST_INTERVAL); + parser.declareLong(Builder::setRenormalizationWindowDays, Job.RENORMALIZATION_WINDOW_DAYS); + parser.declareLong(Builder::setResultsRetentionDays, Job.RESULTS_RETENTION_DAYS); + parser.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS); + parser.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS); + parser.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT); + } + // These fields should not be set by a REST request + INTERNAL_PARSER.declareString(Builder::setModelSnapshotId, Job.MODEL_SNAPSHOT_ID); + INTERNAL_PARSER.declareLong(Builder::setEstablishedModelMemory, Job.ESTABLISHED_MODEL_MEMORY); + INTERNAL_PARSER.declareString(Builder::setModelSnapshotMinVersion, Job.MODEL_SNAPSHOT_MIN_VERSION); + INTERNAL_PARSER.declareString(Builder::setJobVersion, Job.JOB_VERSION); } private final String jobId; @@ -67,6 +76,7 @@ public class JobUpdate implements Writeable, ToXContentObject { private final String modelSnapshotId; private final Version modelSnapshotMinVersion; private final Long establishedModelMemory; + private final Version jobVersion; private JobUpdate(String jobId, @Nullable List groups, @Nullable String description, @Nullable List detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig, @@ -74,7 +84,8 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String @Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays, @Nullable Long modelSnapshotRetentionDays, @Nullable List categorisationFilters, @Nullable Map customSettings, @Nullable String modelSnapshotId, - @Nullable Version modelSnapshotMinVersion, @Nullable Long establishedModelMemory) { + @Nullable Version modelSnapshotMinVersion, @Nullable Long establishedModelMemory, + @Nullable Version jobVersion) { this.jobId = jobId; this.groups = groups; this.description = description; @@ -90,6 +101,7 @@ private JobUpdate(String jobId, @Nullable List groups, @Nullable String this.modelSnapshotId = modelSnapshotId; this.modelSnapshotMinVersion = modelSnapshotMinVersion; this.establishedModelMemory = establishedModelMemory; + this.jobVersion = jobVersion; } public JobUpdate(StreamInput in) throws IOException { @@ -119,16 +131,21 @@ public JobUpdate(StreamInput in) throws IOException { } customSettings = in.readMap(); modelSnapshotId = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { - modelSnapshotMinVersion = Version.readVersion(in); - } else { - modelSnapshotMinVersion = null; - } if (in.getVersion().onOrAfter(Version.V_6_1_0)) { establishedModelMemory = in.readOptionalLong(); } else { establishedModelMemory = null; } + if (in.getVersion().onOrAfter(Version.V_6_3_0) && in.readBoolean()) { + jobVersion = Version.readVersion(in); + } else { + jobVersion = null; + } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1) && in.readBoolean()) { + modelSnapshotMinVersion = Version.readVersion(in); + } else { + modelSnapshotMinVersion = null; + } } @Override @@ -155,7 +172,18 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeMap(customSettings); out.writeOptionalString(modelSnapshotId); + if (out.getVersion().onOrAfter(Version.V_6_1_0)) { + out.writeOptionalLong(establishedModelMemory); + } if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + if (jobVersion != null) { + out.writeBoolean(true); + Version.writeVersion(jobVersion, out); + } else { + out.writeBoolean(false); + } + } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { if (modelSnapshotMinVersion != null) { out.writeBoolean(true); Version.writeVersion(modelSnapshotMinVersion, out); @@ -163,9 +191,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } - if (out.getVersion().onOrAfter(Version.V_6_1_0)) { - out.writeOptionalLong(establishedModelMemory); - } } public String getJobId() { @@ -228,6 +253,10 @@ public Long getEstablishedModelMemory() { return establishedModelMemory; } + public Version getJobVersion() { + return jobVersion; + } + public boolean isAutodetectProcessUpdate() { return modelPlotConfig != null || detectorUpdates != null; } @@ -278,6 +307,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (establishedModelMemory != null) { builder.field(Job.ESTABLISHED_MODEL_MEMORY.getPreferredName(), establishedModelMemory); } + if (jobVersion != null) { + builder.field(Job.JOB_VERSION.getPreferredName(), jobVersion); + } builder.endObject(); return builder; } @@ -326,13 +358,16 @@ public Set getUpdateFields() { if (establishedModelMemory != null) { updateFields.add(Job.ESTABLISHED_MODEL_MEMORY.getPreferredName()); } + if (jobVersion != null) { + updateFields.add(Job.JOB_VERSION.getPreferredName()); + } return updateFields; } /** * Updates {@code source} with the new values in this object returning a new {@link Job}. * - * @param source Source job to be updated + * @param source Source job to be updated * @param maxModelMemoryLimit The maximum model memory allowed * @return A new job equivalent to {@code source} updated. */ @@ -408,6 +443,9 @@ public Job mergeWithJob(Job source, ByteSizeValue maxModelMemoryLimit) { builder.setEstablishedModelMemory(null); } } + if (jobVersion != null) { + builder.setJobVersion(jobVersion); + } return builder.build(); } @@ -437,14 +475,15 @@ public boolean equals(Object other) { && Objects.equals(this.customSettings, that.customSettings) && Objects.equals(this.modelSnapshotId, that.modelSnapshotId) && Objects.equals(this.modelSnapshotMinVersion, that.modelSnapshotMinVersion) - && Objects.equals(this.establishedModelMemory, that.establishedModelMemory); + && Objects.equals(this.establishedModelMemory, that.establishedModelMemory) + && Objects.equals(this.jobVersion, that.jobVersion); } @Override public int hashCode() { return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays, backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings, - modelSnapshotId, modelSnapshotMinVersion, establishedModelMemory); + modelSnapshotId, modelSnapshotMinVersion, establishedModelMemory, jobVersion); } public static class DetectorUpdate implements Writeable, ToXContentObject { @@ -555,6 +594,7 @@ public static class Builder { private String modelSnapshotId; private Version modelSnapshotMinVersion; private Long establishedModelMemory; + private Version jobVersion; public Builder(String jobId) { this.jobId = jobId; @@ -640,10 +680,20 @@ public Builder setEstablishedModelMemory(Long establishedModelMemory) { return this; } + public Builder setJobVersion(Version version) { + this.jobVersion = version; + return this; + } + + public Builder setJobVersion(String version) { + this.jobVersion = Version.fromString(version); + return this; + } + public JobUpdate build() { return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval, renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings, - modelSnapshotId, modelSnapshotMinVersion, establishedModelMemory); + modelSnapshotId, modelSnapshotMinVersion, establishedModelMemory, jobVersion); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java index 3b8ea2910d13d..6bd6228f2efe1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetaData.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -74,13 +75,13 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; TokenMetaData that = (TokenMetaData)o; - return keys.equals(that.keys) && currentKeyHash.equals(that.currentKeyHash); + return keys.equals(that.keys) && Arrays.equals(currentKeyHash, that.currentKeyHash); } @Override public int hashCode() { int result = keys.hashCode(); - result = 31 * result + currentKeyHash.hashCode(); + result = 31 * result + Arrays.hashCode(currentKeyHash); return result; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java index 7a976c89cdb40..3663ff14e6302 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdateTests.java @@ -26,6 +26,8 @@ public class JobUpdateTests extends AbstractSerializingTestCase { + private boolean useInternalParser = randomBoolean(); + @Override protected JobUpdate createTestInstance() { JobUpdate.Builder update = new JobUpdate.Builder(randomAlphaOfLength(4)); @@ -84,15 +86,18 @@ protected JobUpdate createTestInstance() { if (randomBoolean()) { update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10))); } - if (randomBoolean()) { + if (useInternalParser && randomBoolean()) { update.setModelSnapshotId(randomAlphaOfLength(10)); } - if (randomBoolean()) { + if (useInternalParser && randomBoolean()) { update.setModelSnapshotMinVersion(Version.CURRENT); } - if (randomBoolean()) { + if (useInternalParser && randomBoolean()) { update.setEstablishedModelMemory(randomNonNegativeLong()); } + if (useInternalParser && randomBoolean()) { + update.setJobVersion(randomFrom(Version.CURRENT, Version.V_6_2_0, Version.V_6_1_0)); + } return update.build(); } @@ -104,7 +109,11 @@ protected Writeable.Reader instanceReader() { @Override protected JobUpdate doParseInstance(XContentParser parser) { - return JobUpdate.PARSER.apply(parser, null).build(); + if (useInternalParser) { + return JobUpdate.INTERNAL_PARSER.apply(parser, null).build(); + } else { + return JobUpdate.EXTERNAL_PARSER.apply(parser, null).build(); + } } public void testMergeWithJob() { @@ -137,6 +146,7 @@ public void testMergeWithJob() { updateBuilder.setCategorizationFilters(categorizationFilters); updateBuilder.setCustomSettings(customSettings); updateBuilder.setModelSnapshotId(randomAlphaOfLength(10)); + updateBuilder.setJobVersion(Version.V_6_1_0); JobUpdate update = updateBuilder.build(); Job.Builder jobBuilder = new Job.Builder("foo"); @@ -164,6 +174,7 @@ public void testMergeWithJob() { assertEquals(update.getCategorizationFilters(), updatedJob.getAnalysisConfig().getCategorizationFilters()); assertEquals(update.getCustomSettings(), updatedJob.getCustomSettings()); assertEquals(update.getModelSnapshotId(), updatedJob.getModelSnapshotId()); + assertEquals(update.getJobVersion(), updatedJob.getJobVersion()); for (JobUpdate.DetectorUpdate detectorUpdate : update.getDetectorUpdates()) { assertNotNull(updatedJob.getAnalysisConfig().getDetectors().get(detectorUpdate.getDetectorIndex()).getDetectorDescription()); assertEquals(detectorUpdate.getDescription(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/TokenMetaDataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/TokenMetaDataTests.java new file mode 100644 index 0000000000000..77f7c4dd3ad04 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/TokenMetaDataTests.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.security.authc; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class TokenMetaDataTests extends ESTestCase { + + public void testEqualsAndHashCode() { + final int numKeyAndTimestamps = scaledRandomIntBetween(1, 8); + final List keyAndTimestampList = generateKeyAndTimestampListOfSize(numKeyAndTimestamps); + final byte[] currentKeyHash = randomByteArrayOfLength(8); + final TokenMetaData original = new TokenMetaData(keyAndTimestampList, currentKeyHash); + + EqualsHashCodeTestUtils.checkEqualsAndHashCode(original, tokenMetaData -> { + final List copiedList = new ArrayList<>(keyAndTimestampList); + final byte[] copyKeyHash = Arrays.copyOf(currentKeyHash, currentKeyHash.length); + return new TokenMetaData(copiedList, copyKeyHash); + }, tokenMetaData -> { + final List modifiedList = generateKeyAndTimestampListOfSize(numKeyAndTimestamps); + return new TokenMetaData(modifiedList, currentKeyHash); + }); + + EqualsHashCodeTestUtils.checkEqualsAndHashCode(original, tokenMetaData -> { + BytesStreamOutput out = new BytesStreamOutput(); + tokenMetaData.writeTo(out); + return new TokenMetaData(out.bytes().streamInput()); + }, tokenMetaData -> { + final byte[] modifiedKeyHash = randomByteArrayOfLength(8); + return new TokenMetaData(keyAndTimestampList, modifiedKeyHash); + }); + } + + private List generateKeyAndTimestampListOfSize(int size) { + final List keyAndTimestampList = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + keyAndTimestampList.add( + new KeyAndTimestamp(new SecureString(randomAlphaOfLengthBetween(1, 12).toCharArray()), randomNonNegativeLong())); + } + return keyAndTimestampList; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java index 75b7ea1e5934f..ec62901d65a6e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java @@ -91,10 +91,10 @@ private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, in } void persist(String jobId, BytesReference bytes) throws IOException { - logger.trace("[{}] ES API CALL: bulk index", jobId); BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON); if (bulkRequest.numberOfActions() > 0) { + logger.trace("[{}] Persisting job state document", jobId); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { client.bulk(bulkRequest).actionGet(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java index 1534b78899f8b..0ead753a4461c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/BytesKey.java @@ -14,7 +14,7 @@ * Simple wrapper around bytes so that it can be used as a cache key. The hashCode is computed * once upon creation and cached. */ -public class BytesKey { +public final class BytesKey { final byte[] bytes; private final int hashCode; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java index 93d222fc791fd..5773bf5a44861 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserPasswdStore.java @@ -80,7 +80,7 @@ public int usersCount() { } public AuthenticationResult verifyPassword(String username, SecureString password, java.util.function.Supplier user) { - char[] hash = users.get(username); + final char[] hash = users.get(username); if (hash == null) { return AuthenticationResult.notHandled(); } @@ -91,7 +91,7 @@ public AuthenticationResult verifyPassword(String username, SecureString passwor } public boolean userExists(String username) { - return users != null && users.containsKey(username); + return users.containsKey(username); } public static Path resolveFile(Environment env) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java index 1631fef60ea89..e17d8c5c7ecfa 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileUserRolesStore.java @@ -75,11 +75,8 @@ int entriesCount() { } public String[] roles(String username) { - if (userRoles == null) { - return Strings.EMPTY_ARRAY; - } - String[] roles = userRoles.get(username); - return roles == null ? Strings.EMPTY_ARRAY : userRoles.get(username); + final String[] roles = userRoles.get(username); + return roles == null ? Strings.EMPTY_ARRAY : roles; } public static Path resolveFile(Environment env) { @@ -160,11 +157,7 @@ public static Map parseFile(Path path, @Nullable Logger logger } for (String user : roleUsers) { - List roles = userToRoles.get(user); - if (roles == null) { - roles = new ArrayList<>(); - userToRoles.put(user, roles); - } + List roles = userToRoles.computeIfAbsent(user, k -> new ArrayList<>()); roles.add(role); } } @@ -185,11 +178,7 @@ public static void writeFile(Map userToRoles, Path path) { HashMap> roleToUsers = new HashMap<>(); for (Map.Entry entry : userToRoles.entrySet()) { for (String role : entry.getValue()) { - List users = roleToUsers.get(role); - if (users == null) { - users = new ArrayList<>(); - roleToUsers.put(role, users); - } + List users = roleToUsers.computeIfAbsent(role, k -> new ArrayList<>()); users.add(entry.getKey()); } } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index 95c9ade5e295d..c024af48187d3 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -25,8 +25,8 @@ public enum DataType { SHORT( JDBCType.SMALLINT, Short.class, Short.BYTES, 5, 6, true, false, true), INTEGER( JDBCType.INTEGER, Integer.class, Integer.BYTES, 10, 11, true, false, true), LONG( JDBCType.BIGINT, Long.class, Long.BYTES, 19, 20, true, false, true), - // 53 bits defaultPrecision ~ 16(15.95) decimal digits (53log10(2)), - DOUBLE( JDBCType.DOUBLE, Double.class, Double.BYTES, 16, 25, false, true, true), + // 53 bits defaultPrecision ~ 15(15.95) decimal digits (53log10(2)), + DOUBLE( JDBCType.DOUBLE, Double.class, Double.BYTES, 15, 25, false, true, true), // 24 bits defaultPrecision - 24*log10(2) =~ 7 (7.22) FLOAT( JDBCType.REAL, Float.class, Float.BYTES, 7, 15, false, true, true), HALF_FLOAT( JDBCType.FLOAT, Double.class, Double.BYTES, 16, 25, false, true, true), @@ -37,7 +37,10 @@ public enum DataType { OBJECT( JDBCType.STRUCT, null, -1, 0, 0), NESTED( JDBCType.STRUCT, null, -1, 0, 0), BINARY( JDBCType.VARBINARY, byte[].class, -1, Integer.MAX_VALUE, 0), - DATE( JDBCType.TIMESTAMP, Timestamp.class, Long.BYTES, 19, 20); + // since ODBC and JDBC interpret precision for Date as display size, + // the precision is 23 (number of chars in ISO8601 with millis) + Z (the UTC timezone) + // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288 + DATE( JDBCType.TIMESTAMP, Timestamp.class, Long.BYTES, 24, 24); // @formatter:on private static final Map jdbcToEs; @@ -75,7 +78,7 @@ public enum DataType { *

* Specified column size. For numeric data, this is the maximum precision. For character * data, this is the length in characters. For datetime datatypes, this is the length in characters of the - * String representation (assuming the maximum allowed defaultPrecision of the fractional seconds component). + * String representation (assuming the maximum allowed defaultPrecision of the fractional milliseconds component). */ public final int defaultPrecision; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index fb08d08fcb926..bf432a7236357 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -148,6 +148,7 @@ public Object visitSysCatalogs(SysCatalogsContext ctx) { @Override public SysTables visitSysTables(SysTablesContext ctx) { List types = new ArrayList<>(); + boolean legacyTableType = false; for (StringContext string : ctx.string()) { String value = string(string); if (value != null) { @@ -156,6 +157,12 @@ public SysTables visitSysTables(SysTablesContext ctx) { // since % is the same as not specifying a value, choose // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments?view=ssdt-18vs2017 // that is skip the value + } + // special case for legacy apps (like msquery) that always asks for 'TABLE' + // which we manually map to all concrete tables supported + else if (value.toUpperCase(Locale.ROOT).equals("TABLE")) { + legacyTableType = true; + types.add(IndexType.INDEX); } else { IndexType type = IndexType.from(value); types.add(type); @@ -165,7 +172,7 @@ public SysTables visitSysTables(SysTablesContext ctx) { // if the ODBC enumeration is specified, skip validation EnumSet set = types.isEmpty() ? null : EnumSet.copyOf(types); - return new SysTables(source(ctx), visitPattern(ctx.clusterPattern), visitPattern(ctx.tablePattern), set); + return new SysTables(source(ctx), visitPattern(ctx.clusterPattern), visitPattern(ctx.tablePattern), set, legacyTableType); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index 3c01736cebe89..8005ce0758981 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -17,6 +17,7 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; import org.elasticsearch.xpack.sql.type.EsField; import java.sql.DatabaseMetaData; @@ -29,7 +30,6 @@ import static java.util.Arrays.asList; import static org.elasticsearch.xpack.sql.type.DataType.INTEGER; -import static org.elasticsearch.xpack.sql.type.DataType.NULL; import static org.elasticsearch.xpack.sql.type.DataType.SHORT; /** @@ -133,11 +133,7 @@ static void fillInRows(String clusterName, String indexName, Map output() { @Override public final void execute(SqlSession session, ActionListener listener) { listener.onResponse(Rows.of(output(), IndexType.VALID.stream() + // *DBC requires ascending order + .sorted(Comparator.comparing(t -> t.toSql())) .map(t -> singletonList(t.toSql())) .collect(toList()))); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java index 2b8e5e8527c31..eb6f6a36b5528 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.plan.logical.command.sys; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo; import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType; import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.regex.LikePattern; @@ -18,6 +19,7 @@ import org.elasticsearch.xpack.sql.util.CollectionUtils; import java.util.ArrayList; +import java.util.Comparator; import java.util.EnumSet; import java.util.List; import java.util.Objects; @@ -33,17 +35,21 @@ public class SysTables extends Command { private final LikePattern pattern; private final LikePattern clusterPattern; private final EnumSet types; + // flag indicating whether tables are reported as `TABLE` or `BASE TABLE` + private final boolean legacyTableTypes; - public SysTables(Location location, LikePattern clusterPattern, LikePattern pattern, EnumSet types) { + public SysTables(Location location, LikePattern clusterPattern, LikePattern pattern, EnumSet types, + boolean legacyTableTypes) { super(location); this.clusterPattern = clusterPattern; this.pattern = pattern; this.types = types; + this.legacyTableTypes = legacyTableTypes; } @Override protected NodeInfo info() { - return NodeInfo.create(this, SysTables::new, clusterPattern, pattern, types); + return NodeInfo.create(this, SysTables::new, clusterPattern, pattern, types, legacyTableTypes); } @Override @@ -89,6 +95,8 @@ public final void execute(SqlSession session, ActionListener liste enumeration[3] = type.toSql(); values.add(asList(enumeration)); } + + values.sort(Comparator.comparing(l -> l.get(3).toString())); listener.onResponse(Rows.of(output(), values)); return; } @@ -108,10 +116,13 @@ public final void execute(SqlSession session, ActionListener liste session.indexResolver().resolveNames(index, regex, types, ActionListener.wrap(result -> listener.onResponse( Rows.of(output(), result.stream() + // sort by type (which might be legacy), then by name + .sorted(Comparator. comparing(i -> legacyName(i.type())) + .thenComparing(Comparator.comparing(i -> i.name()))) .map(t -> asList(cluster, EMPTY, t.name(), - t.type().toSql(), + legacyName(t.type()), EMPTY, null, null, @@ -122,6 +133,10 @@ public final void execute(SqlSession session, ActionListener liste , listener::onFailure)); } + private String legacyName(IndexType indexType) { + return legacyTableTypes && indexType == IndexType.INDEX ? "TABLE" : indexType.toSql(); + } + @Override public int hashCode() { return Objects.hash(clusterPattern, pattern, types); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java index 508ffef530573..ab40b076fac85 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.type.DataTypes; import java.sql.DatabaseMetaData; import java.util.Comparator; @@ -67,9 +68,10 @@ public List output() { public final void execute(SqlSession session, ActionListener listener) { List> rows = Stream.of(DataType.values()) // sort by SQL int type (that's what the JDBC/ODBC specs want) - .sorted(Comparator.comparing(t -> t.jdbcType)) + .sorted(Comparator.comparing(t -> t.jdbcType.getVendorTypeNumber())) .map(t -> asList(t.esType.toUpperCase(Locale.ROOT), t.jdbcType.getVendorTypeNumber(), + //https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/column-size?view=sql-server-2017 t.defaultPrecision, "'", "'", @@ -83,16 +85,17 @@ public final void execute(SqlSession session, ActionListener liste // only numerics are signed !t.isSigned(), //no fixed precision scale SQL_FALSE - false, - null, - null, - null, + Boolean.FALSE, + // not auto-incremented + Boolean.FALSE, null, + DataTypes.metaSqlMinimumScale(t), + DataTypes.metaSqlMaximumScale(t), // SQL_DATA_TYPE - ODBC wants this to be not null - 0, - null, + DataTypes.metaSqlDataType(t), + DataTypes.metaSqlDateTimeSub(t), // Radix - t.isInteger ? Integer.valueOf(10) : (t.isRational ? Integer.valueOf(2) : null), + DataTypes.metaSqlRadix(t), null )) .collect(toList()); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index c0f4947bb88b3..c0bc9b6e52908 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -31,7 +31,7 @@ */ public abstract class DataTypeConversion { - private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateTimeNoMillis().withZoneUTC(); + private static final DateTimeFormatter UTC_DATE_FORMATTER = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); /** * Returns the type compatible with both left and right types diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index c2b40656ba294..6fc7f95bef71e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -51,4 +51,71 @@ public static DataType fromJava(Object value) { } throw new SqlIllegalArgumentException("No idea what's the DataType for {}", value.getClass()); } -} + + // + // Metadata methods, mainly for ODBC. + // As these are fairly obscure and limited in use, there is no point to promote them as a full type methods + // hence why they appear here as utility methods. + // + + // https://docs.microsoft.com/en-us/sql/relational-databases/native-client-odbc-date-time/metadata-catalog + // https://github.com/elastic/elasticsearch/issues/30386 + public static Integer metaSqlDataType(DataType t) { + if (t == DataType.DATE) { + // ODBC SQL_DATETME + return Integer.valueOf(9); + } + // this is safe since the vendor SQL types are short despite the return value + return t.jdbcType.getVendorTypeNumber(); + } + + // https://github.com/elastic/elasticsearch/issues/30386 + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function?view=sql-server-2017 + public static Integer metaSqlDateTimeSub(DataType t) { + if (t == DataType.DATE) { + // ODBC SQL_CODE_TIMESTAMP + return Integer.valueOf(3); + } + // ODBC null + return 0; + } + + // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/decimal-digits?view=sql-server-2017 + public static Short metaSqlMinimumScale(DataType t) { + // TODO: return info for HALF/SCALED_FLOATS (should be based on field not type) + if (t == DataType.DATE) { + return Short.valueOf((short) 3); + } + if (t.isInteger) { + return Short.valueOf((short) 0); + } + // minimum scale? + if (t.isRational) { + return Short.valueOf((short) 0); + } + return null; + } + + public static Short metaSqlMaximumScale(DataType t) { + // TODO: return info for HALF/SCALED_FLOATS (should be based on field not type) + if (t == DataType.DATE) { + return Short.valueOf((short) 3); + } + if (t.isInteger) { + return Short.valueOf((short) 0); + } + if (t.isRational) { + return Short.valueOf((short) t.defaultPrecision); + } + return null; + } + + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function?view=sql-server-2017 + public static Integer metaSqlRadix(DataType t) { + // RADIX - Determines how numbers returned by COLUMN_SIZE and DECIMAL_DIGITS should be interpreted. + // 10 means they represent the number of decimal digits allowed for the column. + // 2 means they represent the number of bits allowed for the column. + // null means radix is not applicable for the given type. + return t.isInteger ? Integer.valueOf(10) : (t.isRational ? Integer.valueOf(2) : null); + } +} \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java index b9737fbba608f..04926db5407f5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DateEsField.java @@ -25,13 +25,6 @@ public DateEsField(String name, Map properties, boolean hasDocV this.formats = CollectionUtils.isEmpty(formats) ? DEFAULT_FORMAT : Arrays.asList(formats); } - @Override - public int getPrecision() { - // same as Long - // TODO: based this on format string - return 19; - } - public List getFormats() { return formats; } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index bddddc6941cbb..0b82530022386 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -38,6 +38,13 @@ public void testSysColumns() { assertEquals(null, radix(row)); assertEquals(Integer.MAX_VALUE, bufferLength(row)); + row = rows.get(4); + assertEquals("date", name(row)); + assertEquals(Types.TIMESTAMP, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(24, precision(row)); + assertEquals(8, bufferLength(row)); + row = rows.get(7); assertEquals("some.dotted", name(row)); assertEquals(Types.STRUCT, sqlType(row)); @@ -59,6 +66,10 @@ private static Object sqlType(List list) { return list.get(4); } + private static Object precision(List list) { + return list.get(6); + } + private static Object bufferLength(List list) { return list.get(7); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java index ac72bcba4d647..27ed27413110f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java @@ -57,8 +57,8 @@ private Tuple sql(String sql) { public void testSysTypes() throws Exception { Command cmd = sql("SYS TYPES").v1(); - List names = asList("BYTE", "SHORT", "INTEGER", "LONG", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", "KEYWORD", "TEXT", - "DATE", "BINARY", "NULL", "UNSUPPORTED", "OBJECT", "NESTED", "BOOLEAN"); + List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", + "KEYWORD", "TEXT", "BOOLEAN", "DATE", "UNSUPPORTED", "OBJECT", "NESTED"); cmd.execute(null, ActionListener.wrap(r -> { assertEquals(19, r.columnCount()); @@ -68,6 +68,8 @@ public void testSysTypes() throws Exception { assertFalse(r.column(9, Boolean.class)); // make sure precision is returned as boolean (not int) assertFalse(r.column(10, Boolean.class)); + // no auto-increment + assertFalse(r.column(11, Boolean.class)); for (int i = 0; i < r.size(); i++) { assertEquals(names.get(i), r.column(0)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java index 956273b9aae2d..291f9ee244e5f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTableTypesTests.java @@ -41,9 +41,9 @@ public void testSysCatalogs() throws Exception { sql.v1().execute(sql.v2(), ActionListener.wrap(r -> { assertEquals(2, r.size()); - assertEquals("BASE TABLE", r.column(0)); - r.advanceRow(); assertEquals("ALIAS", r.column(0)); + r.advanceRow(); + assertEquals("BASE TABLE", r.column(0)); }, ex -> fail(ex.getMessage()))); } } \ No newline at end of file diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index fe36095641a60..c08c423be34eb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.sql.type.EsField; import org.elasticsearch.xpack.sql.type.TypesTests; +import java.util.Comparator; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; @@ -57,30 +58,30 @@ public void testSysTablesDifferentCatalog() throws Exception { public void testSysTablesNoTypes() throws Exception { executeCommand("SYS TABLES", r -> { + assertEquals("alias", r.column(2)); + assertTrue(r.advanceRow()); assertEquals(2, r.size()); assertEquals("test", r.column(2)); - assertTrue(r.advanceRow()); - assertEquals("alias", r.column(2)); }, index, alias); } public void testSysTablesPattern() throws Exception { executeCommand("SYS TABLES LIKE '%'", r -> { + assertEquals("alias", r.column(2)); + assertTrue(r.advanceRow()); assertEquals(2, r.size()); assertEquals("test", r.column(2)); - assertTrue(r.advanceRow()); - assertEquals("alias", r.column(2)); }, index, alias); } public void testSysTablesPatternParameterized() throws Exception { List params = asList(param("%")); executeCommand("SYS TABLES LIKE ?", params, r -> { + assertEquals("alias", r.column(2)); + assertTrue(r.advanceRow()); assertEquals(2, r.size()); assertEquals("test", r.column(2)); - assertTrue(r.advanceRow()); - assertEquals("alias", r.column(2)); - }, index, alias); + }, alias, index); } public void testSysTablesOnlyAliases() throws Exception { @@ -105,6 +106,23 @@ public void testSysTablesOnlyIndices() throws Exception { }, index); } + public void testSysTablesOnlyIndicesInLegacyMode() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE 'TABLE'", r -> { + assertEquals(1, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + }, index); + + } + + public void testSysTablesOnlyIndicesLegacyModeParameterized() throws Exception { + executeCommand("SYS TABLES LIKE 'test' TYPE ?", asList(param("TABLE")), r -> { + assertEquals(1, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + }, index); + } + public void testSysTablesOnlyIndicesParameterized() throws Exception { executeCommand("SYS TABLES LIKE 'test' TYPE ?", asList(param("ALIAS")), r -> { assertEquals(1, r.size()); @@ -114,20 +132,32 @@ public void testSysTablesOnlyIndicesParameterized() throws Exception { public void testSysTablesOnlyIndicesAndAliases() throws Exception { executeCommand("SYS TABLES LIKE 'test' TYPE 'ALIAS', 'BASE TABLE'", r -> { + assertEquals("alias", r.column(2)); + assertTrue(r.advanceRow()); assertEquals(2, r.size()); assertEquals("test", r.column(2)); - assertTrue(r.advanceRow()); - assertEquals("alias", r.column(2)); }, index, alias); } public void testSysTablesOnlyIndicesAndAliasesParameterized() throws Exception { List params = asList(param("ALIAS"), param("BASE TABLE")); executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> { + assertEquals("alias", r.column(2)); + assertTrue(r.advanceRow()); assertEquals(2, r.size()); assertEquals("test", r.column(2)); - assertTrue(r.advanceRow()); + }, index, alias); + } + + public void testSysTablesOnlyIndicesLegacyAndAliasesParameterized() throws Exception { + List params = asList(param("ALIAS"), param("TABLE")); + executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> { assertEquals("alias", r.column(2)); + assertEquals("ALIAS", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); }, index, alias); } @@ -159,7 +189,7 @@ public void testSysTablesTypesEnumeration() throws Exception { executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { assertEquals(2, r.size()); - Iterator it = IndexType.VALID.iterator(); + Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); for (int t = 0; t < r.size(); t++) { assertEquals(it.next().toSql(), r.column(3)); @@ -180,7 +210,7 @@ public void testSysTablesTypesEnumerationWoString() throws Exception { executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { assertEquals(2, r.size()); - Iterator it = IndexType.VALID.iterator(); + Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); for (int t = 0; t < r.size(); t++) { assertEquals(it.next().toSql(), r.column(3)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index a6a322b31838f..8f5477f1951e9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -82,10 +82,15 @@ public void testConversionToDate() { Conversion conversion = DataTypeConversion.conversionFor(DataType.KEYWORD, to); assertNull(conversion.convert(null)); - // TODO we'd like to be able to optionally parse millis here I think.... assertEquals(new DateTime(1000L, DateTimeZone.UTC), conversion.convert("1970-01-01T00:00:01Z")); assertEquals(new DateTime(1483228800000L, DateTimeZone.UTC), conversion.convert("2017-01-01T00:00:00Z")); assertEquals(new DateTime(18000000L, DateTimeZone.UTC), conversion.convert("1970-01-01T00:00:00-05:00")); + + // double check back and forth conversion + DateTime dt = DateTime.now(DateTimeZone.UTC); + Conversion forward = DataTypeConversion.conversionFor(DataType.DATE, DataType.KEYWORD); + Conversion back = DataTypeConversion.conversionFor(DataType.KEYWORD, DataType.DATE); + assertEquals(dt, back.convert(forward.convert(dt))); Exception e = expectThrows(SqlIllegalArgumentException.class, () -> conversion.convert("0xff")); assertEquals("cannot cast [0xff] to [Date]:Invalid format: \"0xff\" is malformed at \"xff\"", e.getMessage()); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java new file mode 100644 index 0000000000000..0a34c697bdf64 --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.type; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.xpack.sql.type.DataType.DATE; +import static org.elasticsearch.xpack.sql.type.DataType.FLOAT; +import static org.elasticsearch.xpack.sql.type.DataType.KEYWORD; +import static org.elasticsearch.xpack.sql.type.DataType.LONG; +import static org.elasticsearch.xpack.sql.type.DataTypes.metaSqlDataType; +import static org.elasticsearch.xpack.sql.type.DataTypes.metaSqlDateTimeSub; +import static org.elasticsearch.xpack.sql.type.DataTypes.metaSqlMaximumScale; +import static org.elasticsearch.xpack.sql.type.DataTypes.metaSqlMinimumScale; +import static org.elasticsearch.xpack.sql.type.DataTypes.metaSqlRadix; + +public class DataTypesTests extends ESTestCase { + + public void testMetaDataType() { + assertEquals(Integer.valueOf(9), metaSqlDataType(DATE)); + DataType t = randomDataTypeNoDate(); + assertEquals(t.jdbcType.getVendorTypeNumber(), metaSqlDataType(t)); + } + + public void testMetaDateTypeSub() { + assertEquals(Integer.valueOf(3), metaSqlDateTimeSub(DATE)); + assertEquals(Integer.valueOf(0), metaSqlDateTimeSub(randomDataTypeNoDate())); + } + + public void testMetaMinimumScale() { + assertEquals(Short.valueOf((short) 3), metaSqlMinimumScale(DATE)); + assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(LONG)); + assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(FLOAT)); + assertNull(metaSqlMinimumScale(KEYWORD)); + } + + public void testMetaMaximumScale() { + assertEquals(Short.valueOf((short) 3), metaSqlMaximumScale(DATE)); + assertEquals(Short.valueOf((short) 0), metaSqlMaximumScale(LONG)); + assertEquals(Short.valueOf((short) FLOAT.defaultPrecision), metaSqlMaximumScale(FLOAT)); + assertNull(metaSqlMaximumScale(KEYWORD)); + } + + public void testMetaRadix() { + assertNull(metaSqlRadix(DATE)); + assertNull(metaSqlRadix(KEYWORD)); + assertEquals(Integer.valueOf(10), metaSqlRadix(LONG)); + assertEquals(Integer.valueOf(2), metaSqlRadix(FLOAT)); + } + + private DataType randomDataTypeNoDate() { + return randomValueOtherThan(DataType.DATE, () -> randomFrom(DataType.values())); + } +} + diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index c5e82123d7b8b..891b11ba70bb0 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -82,7 +82,7 @@ public void testDateField() { EsField field = mapping.get("date"); assertThat(field.getDataType(), is(DATE)); assertThat(field.hasDocValues(), is(true)); - assertThat(field.getPrecision(), is(19)); + assertThat(field.getPrecision(), is(24)); DateEsField dfield = (DateEsField) field; List formats = dfield.getFormats(); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml index ea545da5f639c..1a587c47fd573 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/delete_model_snapshot.yml @@ -88,7 +88,24 @@ setup: "description": "second", "latest_record_time_stamp": "2016-06-01T00:00:00Z", "latest_result_time_stamp": "2016-06-01T00:00:00Z", - "snapshot_doc_count": 3 + "snapshot_doc_count": 3, + "model_size_stats": { + "job_id" : "delete-model-snapshot", + "result_type" : "model_size_stats", + "model_bytes" : 0, + "total_by_field_count" : 101, + "total_over_field_count" : 0, + "total_partition_field_count" : 0, + "bucket_allocation_failures_count" : 0, + "memory_status" : "ok", + "log_time" : 1495808248662, + "timestamp" : 1495808248662 + }, + "quantiles": { + "job_id": "delete-model-snapshot", + "timestamp": 1495808248662, + "quantile_state": "quantiles-1" + } } - do: @@ -106,12 +123,10 @@ setup: - do: headers: Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser - xpack.ml.update_job: + xpack.ml.revert_model_snapshot: job_id: delete-model-snapshot - body: > - { - "model_snapshot_id": "active-snapshot" - } + snapshot_id: "active-snapshot" + --- "Test delete snapshot missing snapshotId": diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml index 6fa66667e2641..554e339687ba4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/stats/10_basic.yml @@ -1,13 +1,17 @@ --- "Test watcher stats output": - + - skip: + version: "all" + reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30298" - do: {xpack.watcher.stats: {}} - match: { "manually_stopped": false } - match: { "stats.0.watcher_state": "started" } --- "Test watcher stats supports emit_stacktraces parameter": - + - skip: + version: "all" + reason: "@AwaitsFix: https://github.com/elastic/elasticsearch/issues/30298" - do: xpack.watcher.stats: metric: "all" diff --git a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java index 9f1fb95ed4835..ef5c3acc3d238 100644 --- a/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java +++ b/x-pack/plugin/upgrade/src/test/java/org/elasticsearch/xpack/upgrade/IndexUpgradeIT.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.upgrade; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.search.SearchResponse; @@ -31,7 +30,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.core.IsEqual.equalTo; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30430") public class IndexUpgradeIT extends IndexUpgradeIntegTestCase { @Before diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java index 7d9e91384e515..3754cc440eb64 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpRequest.java @@ -511,7 +511,7 @@ public interface Field { * @param params The ToXContentParams from the parent write * @param excludeField The field to exclude * @return A bytearrayinputstream that contains the serialized request - * @throws IOException + * @throws IOException if an IOException is triggered in the underlying toXContent method */ public static InputStream filterToXContent(HttpRequest request, XContent xContent, ToXContent.Params params, String excludeField) throws IOException { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java index f370847aca965..ec63a68d3cae5 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/TriggerEngine.java @@ -37,11 +37,6 @@ public interface TriggerEngine { */ void pauseExecution(); - /** - * Returns the number of active jobs currently in this trigger engine implementation - */ - int getJobCount(); - /** * Removes the job associated with the given name from this trigger engine. * diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java index 386e28501210e..2d44434206d76 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/manual/ManualTriggerEngine.java @@ -52,11 +52,6 @@ public void add(Watch job) { public void pauseExecution() { } - @Override - public int getJobCount() { - return 0; - } - @Override public boolean remove(String jobId) { return false; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index de8ab1d1f4bc6..05aa7cf302817 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -77,11 +77,6 @@ public void add(Watch watch) { schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); } - @Override - public int getJobCount() { - return schedules.size(); - } - @Override public boolean remove(String jobId) { return schedules.remove(jobId) != null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java index 57fe40f67b4dd..58f5c8f4a26b0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -71,11 +71,6 @@ public void pauseExecution() { watches.clear(); } - @Override - public int getJobCount() { - return watches.size(); - } - @Override public boolean remove(String jobId) { return watches.remove(jobId) != null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java index 572a361d25210..df96a802166e2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java @@ -671,11 +671,6 @@ public void add(Watch watch) { public void pauseExecution() { } - @Override - public int getJobCount() { - return 0; - } - @Override public boolean remove(String jobId) { return false; diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 8f5952d61edfb..b2bb7a63f6fb4 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -141,7 +141,7 @@ subprojects { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { dependsOn copyTestNodeKeystore if (version.before('6.3.0')) { - plugin xpackProject('plugin').path + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" } bwcVersion = version numBwcNodes = 2 diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index a5fc1575f484a..14bdd533c6b38 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -206,6 +206,7 @@ public void testMemoryStatus() throws Exception { assertThat(e.getMessage(), equalTo("Cannot run forecast: Forecast cannot be executed as model memory status is not OK")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30399") public void testMemoryLimit() throws Exception { Detector.Builder detector = new Detector.Builder("mean", "value"); detector.setByFieldName("clientIP"); diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index bb1b5c58c4aa5..91a6d106c98f8 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -82,7 +82,7 @@ for (Version version : bwcVersions.wireCompatible) { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { if (version.before('6.3.0')) { - plugin xpackProject('plugin').path + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" } bwcVersion = version numBwcNodes = 2 diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 433dc08e1f39f..6e93041e9a0f4 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -123,7 +123,7 @@ subprojects { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { dependsOn copyTestNodeKeystore if (version.before('6.3.0')) { - plugin xpackProject('plugin').path + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" } String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 8eed9d5a7de4d..65b1a7c85dcb1 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -66,7 +66,7 @@ protected Settings restClientSettings() { } protected Collection templatesToWaitFor() { - return Collections.singletonList(".security"); + return Collections.singletonList("security-index-template"); } @Before diff --git a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java index efd426439e0ab..32dd60cfa2dce 100644 --- a/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java +++ b/x-pack/qa/sql/multinode/src/test/java/org/elasticsearch/xpack/qa/sql/multinode/RestSqlMultinodeIT.java @@ -8,6 +8,7 @@ import org.apache.http.HttpHost; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; @@ -53,7 +54,7 @@ public void testIndexOnWrongNode() throws IOException { String firstHostName = null; String match = firstHost.getHostName() + ":" + firstHost.getPort(); - Map nodesInfo = responseToMap(client().performRequest("GET", "/_nodes")); + Map nodesInfo = responseToMap(client().performRequest(new Request("GET", "/_nodes"))); @SuppressWarnings("unchecked") Map nodes = (Map) nodesInfo.get("nodes"); for (Map.Entry node : nodes.entrySet()) { @@ -74,7 +75,9 @@ public void testIndexOnWrongNode() throws IOException { } index.endObject(); index.endObject(); - client().performRequest("PUT", "/test", emptyMap(), new StringEntity(Strings.toString(index), ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/test"); + request.setJsonEntity(Strings.toString(index)); + client().performRequest(request); int documents = between(10, 100); createTestData(documents); @@ -84,6 +87,9 @@ public void testIndexOnWrongNode() throws IOException { } private void createTestData(int documents) throws UnsupportedCharsetException, IOException { + Request request = new Request("PUT", "/test/test/_bulk"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); for (int i = 0; i < documents; i++) { int a = 3 * i; @@ -92,8 +98,9 @@ private void createTestData(int documents) throws UnsupportedCharsetException, I bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n"); bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n"); } - client().performRequest("PUT", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + + client().performRequest(request); } private Map responseToMap(Response response) throws IOException { @@ -108,14 +115,12 @@ private void assertCount(RestClient client, int count) throws IOException { expected.put("columns", singletonList(columnInfo(mode, "COUNT(1)", "long", JDBCType.BIGINT, 20))); expected.put("rows", singletonList(singletonList(count))); - Map params = new TreeMap<>(); - params.put("format", "json"); // JSON is easier to parse then a table - if (Strings.hasText(mode)) { - params.put("mode", mode); // JDBC or PLAIN mode + Request request = new Request("POST", "/_xpack/sql"); + if (false == mode.isEmpty()) { + request.addParameter("mode", mode); } - - Map actual = responseToMap(client.performRequest("POST", "/_xpack/sql", params, - new StringEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON))); + request.setJsonEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}"); + Map actual = responseToMap(client.performRequest(request)); if (false == expected.equals(actual)) { NotEqualMessageBuilder message = new NotEqualMessageBuilder(); diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java index 6ac1c2c11ea9b..f7abb6f64f63c 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -10,6 +10,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Nullable; @@ -176,14 +177,15 @@ private static Map runSql(@Nullable String asUser, String mode, } private static Map runSql(@Nullable String asUser, String mode, HttpEntity entity) throws IOException { - Map params = new TreeMap<>(); - params.put("format", "json"); // JSON is easier to parse then a table - if (Strings.hasText(mode)) { - params.put("mode", mode); // JDBC or PLAIN mode + Request request = new Request("POST", "/_xpack/sql"); + if (false == mode.isEmpty()) { + request.addParameter("mode", mode); } - Header[] headers = asUser == null ? new Header[0] : new Header[] {new BasicHeader("es-security-runas-user", asUser)}; - Response response = client().performRequest("POST", "/_xpack/sql", params, entity, headers); - return toMap(response); + if (asUser != null) { + request.setHeaders(new BasicHeader("es-security-runas-user", asUser)); + } + request.setEntity(entity); + return toMap(client().performRequest(request)); } private static void assertResponse(Map expected, Map actual) { @@ -234,11 +236,7 @@ public void testHijackScrollFails() throws Exception { createAuditLogAsserter() .expectSqlCompositeAction("test_admin", "test") .expect(true, SQL_ACTION_NAME, "full_access", empty()) - // One scroll access denied per shard - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + // one scroll access denied per shard .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") .assertLogs(); } diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java index 205cd479dde1b..481e7a4f60f19 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/SqlSecurityTestCase.java @@ -11,6 +11,7 @@ import org.elasticsearch.SpecialPermission; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -41,7 +42,6 @@ import java.util.function.Function; import java.util.regex.Pattern; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; @@ -135,6 +135,9 @@ public void oneTimeSetup() throws Exception { * write the test data once. */ return; } + Request request = new Request("PUT", "/_bulk"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n"); bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n"); @@ -142,8 +145,8 @@ public void oneTimeSetup() throws Exception { bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n"); bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n"); bulk.append("{\"a\": \"test\"}\n"); - client().performRequest("PUT", "/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + client().performRequest(request); oneTimeSetup = true; } @@ -173,7 +176,7 @@ public void setInitialAuditLogOffset() { @AfterClass public static void wipeIndicesAfterTests() throws IOException { try { - adminClient().performRequest("DELETE", "*"); + adminClient().performRequest(new Request("DELETE", "*")); } catch (ResponseException e) { // 404 here just means we had no indexes if (e.getResponse().getStatusLine().getStatusCode() != 404) { @@ -472,13 +475,15 @@ public void testNoGetIndex() throws Exception { } protected static void createUser(String name, String role) throws IOException { - XContentBuilder user = JsonXContent.contentBuilder().prettyPrint().startObject(); { + Request request = new Request("PUT", "/_xpack/security/user/" + name); + XContentBuilder user = JsonXContent.contentBuilder().prettyPrint(); + user.startObject(); { user.field("password", "testpass"); user.field("roles", role); } user.endObject(); - client().performRequest("PUT", "/_xpack/security/user/" + name, emptyMap(), - new StringEntity(Strings.toString(user), ContentType.APPLICATION_JSON)); + request.setJsonEntity(Strings.toString(user)); + client().performRequest(request); } protected AuditLogAsserter createAuditLogAsserter() { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java index 63795edecf855..6adf37ff325e6 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/CliIntegrationTestCase.java @@ -5,9 +5,9 @@ */ package org.elasticsearch.xpack.qa.sql.cli; -import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -19,7 +19,6 @@ import java.io.IOException; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; public abstract class CliIntegrationTestCase extends ESRestTestCase { @@ -60,11 +59,13 @@ protected SecurityConfig securityConfig() { } protected void index(String index, CheckedConsumer body) throws IOException { + Request request = new Request("PUT", "/" + index + "/doc/1"); + request.addParameter("refresh", "true"); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); builder.endObject(); - HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc); + request.setJsonEntity(Strings.toString(builder)); + client().performRequest(request); } public String command(String command) throws IOException { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java index 9a5d5b9c3eaca..f93ae339a820d 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/ErrorsTestCase.java @@ -8,8 +8,7 @@ import java.io.IOException; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; - -import static java.util.Collections.emptyMap; +import org.elasticsearch.client.Request; import static org.hamcrest.Matchers.startsWith; @@ -41,7 +40,9 @@ public void testSelectFromMissingIndex() throws IOException { @Override public void testSelectFromIndexWithoutTypes() throws Exception { // Create an index without any types - client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/test"); + request.setJsonEntity("{}"); + client().performRequest(request); assertFoundOneProblem(command("SELECT * FROM test")); assertEquals("line 1:15: [test] doesn't have any types so it is incompatible with sql" + END, readLine()); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java index dc34b9c1101c7..542e71ea1841e 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/cli/FetchSizeTestCase.java @@ -7,10 +7,10 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import java.io.IOException; -import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; /** @@ -18,13 +18,16 @@ */ public abstract class FetchSizeTestCase extends CliIntegrationTestCase { public void testSelect() throws IOException { + Request request = new Request("PUT", "/test/doc/_bulk"); + request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); for (int i = 0; i < 20; i++) { bulk.append("{\"index\":{}}\n"); bulk.append("{\"test_field\":" + i + "}\n"); } - client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + client().performRequest(request); + assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m4[0m", command("fetch size = 4")); assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"", command("fetch separator = \" -- fetch sep -- \"")); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java index 9137e2028aa50..f3fdd8e267ac3 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/DataLoader.java @@ -8,6 +8,7 @@ import org.apache.http.HttpHost; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; @@ -55,6 +56,7 @@ private static void createString(String name, XContentBuilder builder) throws Ex .endObject(); } protected static void loadDatasetIntoEs(RestClient client, String index) throws Exception { + Request request = new Request("PUT", "/" + index); XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); createIndex.startObject("settings"); { @@ -91,11 +93,9 @@ protected static void loadDatasetIntoEs(RestClient client, String index) throws createIndex.endObject(); } createIndex.endObject().endObject(); - - client.performRequest("PUT", "/" + index, emptyMap(), new StringEntity(Strings.toString(createIndex), - ContentType.APPLICATION_JSON)); + request.setJsonEntity(Strings.toString(createIndex)); + client.performRequest(request); - Map deps = new LinkedHashMap<>(); csvToLines("departments", (titles, fields) -> deps.put(fields.get(0), fields.get(1))); @@ -119,6 +119,8 @@ protected static void loadDatasetIntoEs(RestClient client, String index) throws list.add(dep); }); + request = new Request("POST", "/" + index + "/emp/_bulk"); + request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); csvToLines("employees", (titles, fields) -> { bulk.append("{\"index\":{}}\n"); @@ -146,17 +148,16 @@ protected static void loadDatasetIntoEs(RestClient client, String index) throws bulk.setLength(bulk.length() - 1); bulk.append("]"); } - + bulk.append("}\n"); }); - - client.performRequest("POST", "/" + index + "/emp/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + client.performRequest(request); } protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception { for (String index : indices) { - client.performRequest("POST", "/" + index + "/_alias/" + aliasName); + client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName)); } } diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java index 0fffb0dac4c3b..ea6c5f165ee6f 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/ErrorsTestCase.java @@ -9,8 +9,7 @@ import java.sql.SQLException; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; - -import static java.util.Collections.emptyMap; +import org.elasticsearch.client.Request; import static org.hamcrest.Matchers.startsWith; @@ -37,7 +36,9 @@ public void testSelectFromMissingIndex() throws SQLException { @Override public void testSelectFromIndexWithoutTypes() throws Exception { // Create an index without any types - client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/test"); + request.setJsonEntity("{}"); + client().performRequest(request); try (Connection c = esJdbc()) { SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery()); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java index de7cf465acacf..4d2487a0c03ff 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/FetchSizeTestCase.java @@ -7,6 +7,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.junit.Before; import java.io.IOException; @@ -15,7 +16,6 @@ import java.sql.SQLException; import java.sql.Statement; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts; /** @@ -25,13 +25,15 @@ public class FetchSizeTestCase extends JdbcIntegrationTestCase { @Before public void createTestIndex() throws IOException { + Request request = new Request("PUT", "/test/doc/_bulk"); + request.addParameter("refresh", "true"); StringBuilder bulk = new StringBuilder(); for (int i = 0; i < 20; i++) { bulk.append("{\"index\":{}}\n"); bulk.append("{\"test_field\":" + i + "}\n"); } - client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + client().performRequest(request); } /** diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java index aa5dc5c0ac2b6..fc0cd67efac14 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/JdbcIntegrationTestCase.java @@ -9,6 +9,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -85,16 +86,18 @@ protected Connection useDataSource() throws SQLException { } public static void index(String index, CheckedConsumer body) throws IOException { + Request request = new Request("PUT", "/" + index + "/doc/1"); + request.addParameter("refresh", "true"); XContentBuilder builder = JsonXContent.contentBuilder().startObject(); body.accept(builder); builder.endObject(); - HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); - client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc); + request.setJsonEntity(Strings.toString(builder)); + client().performRequest(request); } protected String clusterName() { try { - String response = EntityUtils.toString(client().performRequest("GET", "/").getEntity()); + String response = EntityUtils.toString(client().performRequest(new Request("GET", "/")).getEntity()); return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false).get("cluster_name").toString(); } catch (IOException e) { throw new RuntimeException(e); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java index 5a589f94d28d4..d8ba1ade959ae 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc/SpecBaseIntegrationTestCase.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.qa.sql.jdbc; import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; @@ -49,7 +50,7 @@ public SpecBaseIntegrationTestCase(String fileName, String groupName, String tes @Before public void setupTestDataIfNeeded() throws Exception { - if (client().performRequest("HEAD", "/test_emp").getStatusLine().getStatusCode() == 404) { + if (client().performRequest(new Request("HEAD", "/test_emp")).getStatusLine().getStatusCode() == 404) { DataLoader.loadDatasetIntoEs(client()); } } @@ -62,7 +63,7 @@ protected boolean preserveIndicesUponCompletion() { @AfterClass public static void wipeTestData() throws IOException { try { - adminClient().performRequest("DELETE", "/*"); + adminClient().performRequest(new Request("DELETE", "/*")); } catch (ResponseException e) { // 404 here just means we had no indexes if (e.getResponse().getStatusLine().getStatusCode() != 404) { diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java index 8062d7af497de..3019a00351c28 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -12,6 +12,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.CheckedSupplier; @@ -74,16 +75,19 @@ public void testBasicQuery() throws IOException { } public void testNextPage() throws IOException { + Request request = new Request("POST", "/test/test/_bulk"); + request.addParameter("refresh", "true"); String mode = randomMode(); StringBuilder bulk = new StringBuilder(); for (int i = 0; i < 20; i++) { bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n"); bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"); } - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + client().performRequest(request); - String request = "{\"query\":\"" + String sqlRequest = + "{\"query\":\"" + " SELECT text, number, SQRT(number) AS s, SCORE()" + " FROM test" + " ORDER BY number, SCORE()\", " @@ -94,7 +98,7 @@ public void testNextPage() throws IOException { for (int i = 0; i < 20; i += 2) { Map response; if (i == 0) { - response = runSql(mode, new StringEntity(request, ContentType.APPLICATION_JSON)); + response = runSql(mode, new StringEntity(sqlRequest, ContentType.APPLICATION_JSON)); } else { response = runSql(mode, new StringEntity("{\"cursor\":\"" + cursor + "\"}", ContentType.APPLICATION_JSON)); @@ -138,12 +142,14 @@ public void testTimeZone() throws IOException { } public void testScoreWithFieldNamedScore() throws IOException { + Request request = new Request("POST", "/test/test/_bulk"); + request.addParameter("refresh", "true"); String mode = randomMode(); StringBuilder bulk = new StringBuilder(); bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); bulk.append("{\"name\":\"test\", \"score\":10}\n"); - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + request.setJsonEntity(bulk.toString()); + client().performRequest(request); Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( @@ -209,7 +215,9 @@ public void testSelectFromMissingIndex() { @Override public void testSelectFromIndexWithoutTypes() throws Exception { // Create an index without any types - client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/test"); + request.setJsonEntity("{}"); + client().performRequest(request); String mode = randomFrom("jdbc", "plain"); expectBadRequest(() -> runSql(mode, "SELECT * FROM test"), containsString("1:15: [test] doesn't have any types so it is incompatible with sql")); @@ -229,24 +237,9 @@ public void testSelectMissingFunction() throws Exception { containsString("1:8: Unknown function [missing]")); } - private void index(String... docs) throws IOException { - StringBuilder bulk = new StringBuilder(); - for (String doc : docs) { - bulk.append("{\"index\":{}\n"); - bulk.append(doc + "\n"); - } - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); - } - @Override public void testSelectProjectScoreInAggContext() throws Exception { - StringBuilder bulk = new StringBuilder(); - bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); - bulk.append("{\"foo\":1}\n"); - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); - + index("{\"foo\":1}"); expectBadRequest(() -> runSql(randomMode(), " SELECT foo, SCORE(), COUNT(*)" + " FROM test" @@ -256,12 +249,7 @@ public void testSelectProjectScoreInAggContext() throws Exception { @Override public void testSelectOrderByScoreInAggContext() throws Exception { - StringBuilder bulk = new StringBuilder(); - bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); - bulk.append("{\"foo\":1}\n"); - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); - + index("{\"foo\":1}"); expectBadRequest(() -> runSql(randomMode(), " SELECT foo, COUNT(*)" + " FROM test" @@ -272,36 +260,21 @@ public void testSelectOrderByScoreInAggContext() throws Exception { @Override public void testSelectGroupByScore() throws Exception { - StringBuilder bulk = new StringBuilder(); - bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); - bulk.append("{\"foo\":1}\n"); - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); - + index("{\"foo\":1}"); expectBadRequest(() -> runSql(randomMode(), "SELECT COUNT(*) FROM test GROUP BY SCORE()"), containsString("Cannot use [SCORE()] for grouping")); } @Override public void testSelectScoreSubField() throws Exception { - StringBuilder bulk = new StringBuilder(); - bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); - bulk.append("{\"foo\":1}\n"); - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); - + index("{\"foo\":1}"); expectBadRequest(() -> runSql(randomMode(), "SELECT SCORE().bar FROM test"), containsString("line 1:15: extraneous input '.' expecting {, ','")); } @Override public void testSelectScoreInScalar() throws Exception { - StringBuilder bulk = new StringBuilder(); - bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); - bulk.append("{\"foo\":1}\n"); - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); - + index("{\"foo\":1}"); expectBadRequest(() -> runSql(randomMode(), "SELECT SIN(SCORE()) FROM test"), containsString("line 1:12: [SCORE()] cannot be an argument to a function")); } @@ -340,37 +313,32 @@ private Map runSql(String mode, HttpEntity sql) throws IOExcepti } private Map runSql(String mode, HttpEntity sql, String suffix) throws IOException { - Map params = new TreeMap<>(); - params.put("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. - params.put("pretty", "true"); // Improves error reporting readability + Request request = new Request("POST", "/_xpack/sql" + suffix); + request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server. + request.addParameter("pretty", "true"); // Improves error reporting readability if (randomBoolean()) { // We default to JSON but we force it randomly for extra coverage - params.put("format", "json"); + request.addParameter("format", "json"); } - if (Strings.hasText(mode)) { - params.put("mode", mode); // JDBC or PLAIN mode + if (false == mode.isEmpty()) { + request.addParameter("mode", mode); // JDBC or PLAIN mode } - Header[] headers = randomFrom( + request.setHeaders(randomFrom( new Header[] {}, new Header[] {new BasicHeader("Accept", "*/*")}, - new Header[] {new BasicHeader("Accpet", "application/json")}); - Response response = client().performRequest("POST", "/_xpack/sql" + suffix, params, sql); + new Header[] {new BasicHeader("Accpet", "application/json")})); + request.setEntity(sql); + Response response = client().performRequest(request); try (InputStream content = response.getEntity().getContent()) { return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); } } public void testBasicTranslateQuery() throws IOException { - StringBuilder bulk = new StringBuilder(); - bulk.append("{\"index\":{\"_id\":\"1\"}}\n"); - bulk.append("{\"test\":\"test\"}\n"); - bulk.append("{\"index\":{\"_id\":\"2\"}}\n"); - bulk.append("{\"test\":\"test\"}\n"); - client().performRequest("POST", "/test_translate/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + index("{\"test\":\"test\"}", "{\"test\":\"test\"}"); - Map response = runSql(randomMode(), "SELECT * FROM test_translate", "/translate/"); - assertEquals(response.get("size"), 1000); + Map response = runSql(randomMode(), "SELECT * FROM test", "/translate/"); + assertEquals(1000, response.get("size")); @SuppressWarnings("unchecked") Map source = (Map) response.get("_source"); assertNotNull(source); @@ -459,13 +427,12 @@ public void testBasicQueryText() throws IOException { } public void testNextPageText() throws IOException { - StringBuilder bulk = new StringBuilder(); - for (int i = 0; i < 20; i++) { - bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n"); - bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"); + int size = 20; + String[] docs = new String[size]; + for (int i = 0; i < size; i++) { + docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n"; } - client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"), - new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON)); + index(docs); String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}"; @@ -563,23 +530,33 @@ private Tuple runSqlAsText(String sql, String accept) throws IOE return runSqlAsText("", new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept); } + /** + * Run SQL as text using the {@code Accept} header to specify the format + * rather than the {@code format} parameter. + */ private Tuple runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException { - Response response = client().performRequest("POST", "/_xpack/sql" + suffix, singletonMap("error_trace", "true"), - entity, new BasicHeader("Accept", accept)); + Request request = new Request("POST", "/_xpack/sql" + suffix); + request.addParameter("error_trace", "true"); + request.setEntity(entity); + request.setHeaders(new BasicHeader("Accept", accept)); + Response response = client().performRequest(request); return new Tuple<>( Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)), response.getHeader("Cursor") ); } + /** + * Run SQL as text using the {@code format} parameter to specify the format + * rather than an {@code Accept} header. + */ private Tuple runSqlAsTextFormat(String sql, String format) throws IOException { - StringEntity entity = new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON); - - Map params = new HashMap<>(); - params.put("error_trace", "true"); - params.put("format", format); + Request request = new Request("POST", "/_xpack/sql"); + request.addParameter("error_trace", "true"); + request.addParameter("format", format); + request.setJsonEntity("{\"query\":\"" + sql + "\"}"); - Response response = client().performRequest("POST", "/_xpack/sql", params, entity); + Response response = client().performRequest(request); return new Tuple<>( Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)), response.getHeader("Cursor") @@ -595,23 +572,14 @@ private void assertResponse(Map expected, Map ac } public static int getNumberOfSearchContexts(String index) throws IOException { - Response response = client().performRequest("GET", "/_stats/search"); - Map stats; - try (InputStream content = response.getEntity().getContent()) { - stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); - } - return getOpenContexts(stats, index); + return getOpenContexts(searchStats(), index); } public static void assertNoSearchContexts() throws IOException { - Response response = client().performRequest("GET", "/_stats/search"); - Map stats; - try (InputStream content = response.getEntity().getContent()) { - stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); - } + Map stats = searchStats(); @SuppressWarnings("unchecked") - Map indexStats = (Map) stats.get("indices"); - for (String index : indexStats.keySet()) { + Map indicesStats = (Map) stats.get("indices"); + for (String index : indicesStats.keySet()) { if (index.startsWith(".") == false) { // We are not interested in internal indices assertEquals(index + " should have no search contexts", 0, getOpenContexts(stats, index)); } @@ -619,12 +587,34 @@ public static void assertNoSearchContexts() throws IOException { } @SuppressWarnings("unchecked") - public static int getOpenContexts(Map indexStats, String index) { - return (int) ((Map) ((Map) ((Map) ((Map) - indexStats.get("indices")).get(index)).get("total")).get("search")).get("open_contexts"); + private static int getOpenContexts(Map stats, String index) { + stats = (Map) stats.get("indices"); + stats = (Map) stats.get(index); + stats = (Map) stats.get("total"); + stats = (Map) stats.get("search"); + return (Integer) stats.get("open_contexts"); + } + + private static Map searchStats() throws IOException { + Response response = client().performRequest(new Request("GET", "/_stats/search")); + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + } } public static String randomMode() { return randomFrom("", "jdbc", "plain"); } + + private void index(String... docs) throws IOException { + Request request = new Request("POST", "/test/test/_bulk"); + request.addParameter("refresh", "true"); + StringBuilder bulk = new StringBuilder(); + for (String doc : docs) { + bulk.append("{\"index\":{}\n"); + bulk.append(doc + "\n"); + } + request.setJsonEntity(bulk.toString()); + client().performRequest(request); + } } diff --git a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql index 3d8cf4708945e..69c572f4ddd4e 100644 --- a/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql +++ b/x-pack/qa/sql/src/main/resources/setup_mock_metadata_get_columns.sql @@ -25,26 +25,26 @@ CREATE TABLE mock ( ) AS SELECT null, 'test1', 'name', 12, 'TEXT', 0, 2147483647, null, null, 1, -- columnNullable - null, null, 12, null, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 0, 2147483647, null, null, 1, -- columnNullable - null, null, 12, null, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL -SELECT null, 'test2', 'date', 93, 'DATE', 20, 8, null, null, +SELECT null, 'test2', 'date', 93, 'DATE', 24, 8, null, null, 1, -- columnNullable - null, null, 93, null, null, 1, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 9, 3, null, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL SELECT null, 'test2', 'float', 7, 'FLOAT', 15, 4, null, 2, 1, -- columnNullable - null, null, 7, null, null, 2, 'YES', null, null, null, null, 'NO', 'NO' + null, null, 7, 0, null, 2, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL SELECT null, 'test2', 'number', -5, 'LONG', 20, 8, null, 10, 1, -- columnNullable - null, null, -5, null, null, 3, 'YES', null, null, null, null, 'NO', 'NO' + null, null, -5, 0, null, 3, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL ;